aboutsummaryrefslogtreecommitdiff
path: root/paramiko
diff options
context:
space:
mode:
authorJeremy T. Bouse <jbouse@debian.org>2009-11-27 16:20:09 -0500
committerJeremy T. Bouse <jbouse@debian.org>2009-11-27 16:20:09 -0500
commit176c6caf4ea7918e1698438634b237fab8456471 (patch)
tree6e2a8e5be1af2a6ec324fdbf99589aa099f1ec2a /paramiko
downloadpython-paramiko-176c6caf4ea7918e1698438634b237fab8456471.tar
python-paramiko-176c6caf4ea7918e1698438634b237fab8456471.tar.gz
Imported Upstream version 1.5.2upstream/1.5.2
Diffstat (limited to 'paramiko')
-rw-r--r--paramiko/__init__.py146
-rw-r--r--paramiko/agent.py138
-rw-r--r--paramiko/auth_handler.py410
-rw-r--r--paramiko/ber.py128
-rw-r--r--paramiko/channel.py1174
-rw-r--r--paramiko/common.py136
-rw-r--r--paramiko/compress.py39
-rw-r--r--paramiko/dsskey.py176
-rw-r--r--paramiko/file.py440
-rw-r--r--paramiko/kex_gex.py202
-rw-r--r--paramiko/kex_group1.py136
-rw-r--r--paramiko/logging22.py66
-rw-r--r--paramiko/message.py301
-rw-r--r--paramiko/packet.py442
-rw-r--r--paramiko/pipe.py105
-rw-r--r--paramiko/pkey.py339
-rw-r--r--paramiko/primes.py148
-rw-r--r--paramiko/rsakey.py165
-rw-r--r--paramiko/server.py527
-rw-r--r--paramiko/sftp.py168
-rw-r--r--paramiko/sftp_attr.py208
-rw-r--r--paramiko/sftp_client.py618
-rw-r--r--paramiko/sftp_file.py307
-rw-r--r--paramiko/sftp_handle.py188
-rw-r--r--paramiko/sftp_server.py420
-rw-r--r--paramiko/sftp_si.py303
-rw-r--r--paramiko/ssh_exception.py69
-rw-r--r--paramiko/transport.py1824
-rw-r--r--paramiko/util.py357
29 files changed, 9680 insertions, 0 deletions
diff --git a/paramiko/__init__.py b/paramiko/__init__.py
new file mode 100644
index 0000000..0a312cb
--- /dev/null
+++ b/paramiko/__init__.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+I{Paramiko} (a combination of the esperanto words for "paranoid" and "friend")
+is a module for python 2.3 or greater that implements the SSH2 protocol for
+secure (encrypted and authenticated) connections to remote machines. Unlike
+SSL (aka TLS), the SSH2 protocol does not require heirarchical certificates
+signed by a powerful central authority. You may know SSH2 as the protocol that
+replaced C{telnet} and C{rsh} for secure access to remote shells, but the
+protocol also includes the ability to open arbitrary channels to remote
+services across an encrypted tunnel. (This is how C{sftp} works, for example.)
+
+To use this package, pass a socket (or socket-like object) to a L{Transport},
+and use L{start_server <Transport.start_server>} or
+L{start_client <Transport.start_client>} to negoatite
+with the remote host as either a server or client. As a client, you are
+responsible for authenticating using a password or private key, and checking
+the server's host key. I{(Key signature and verification is done by paramiko,
+but you will need to provide private keys and check that the content of a
+public key matches what you expected to see.)} As a server, you are
+responsible for deciding which users, passwords, and keys to allow, and what
+kind of channels to allow.
+
+Once you have finished, either side may request flow-controlled L{Channel}s to
+the other side, which are python objects that act like sockets, but send and
+receive data over the encrypted session.
+
+Paramiko is written entirely in python (no C or platform-dependent code) and is
+released under the GNU Lesser General Public License (LGPL).
+
+Website: U{http://www.lag.net/paramiko/}
+
+@version: 1.5.2 (rhydon)
+@author: Robey Pointer
+@contact: robey@lag.net
+@license: GNU Lesser General Public License (LGPL)
+"""
+
+import sys
+
+if sys.version_info < (2, 2):
+ raise RuntimeError('You need python 2.2 for this module.')
+
+
+__author__ = "Robey Pointer <robey@lag.net>"
+__date__ = "04 Dec 2005"
+__version__ = "1.5.2 (rhydon)"
+__version_info__ = (1, 5, 2)
+__license__ = "GNU Lesser General Public License (LGPL)"
+
+
+import transport, auth_handler, channel, rsakey, dsskey, message
+import ssh_exception, file, packet, agent, server, util
+import sftp_client, sftp_attr, sftp_handle, sftp_server, sftp_si
+
+from transport import randpool, SecurityOptions, Transport
+from auth_handler import AuthHandler
+from channel import Channel, ChannelFile
+from ssh_exception import SSHException, PasswordRequiredException, BadAuthenticationType
+from server import ServerInterface, SubsystemHandler, InteractiveQuery
+from rsakey import RSAKey
+from dsskey import DSSKey
+from sftp import SFTPError, BaseSFTP
+from sftp_client import SFTP, SFTPClient
+from sftp_server import SFTPServer
+from sftp_attr import SFTPAttributes
+from sftp_handle import SFTPHandle
+from sftp_si import SFTPServerInterface
+from sftp_file import SFTPFile
+from message import Message
+from packet import Packetizer
+from file import BufferedFile
+from agent import Agent, AgentKey
+from pkey import PKey
+
+# fix module names for epydoc
+for x in [Transport, SecurityOptions, Channel, SFTPServer, SSHException, \
+ PasswordRequiredException, BadAuthenticationType, ChannelFile, \
+ SubsystemHandler, AuthHandler, RSAKey, DSSKey, SFTPError, \
+ SFTP, SFTPClient, SFTPServer, Message, Packetizer, SFTPAttributes, \
+ SFTPHandle, SFTPServerInterface, BufferedFile, Agent, AgentKey, \
+ PKey, BaseSFTP, SFTPFile, ServerInterface]:
+ x.__module__ = 'paramiko'
+
+from common import AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, \
+ OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, \
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE
+
+from sftp import SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \
+ SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED
+
+__all__ = [ 'Transport',
+ 'SecurityOptions',
+ 'SubsystemHandler',
+ 'Channel',
+ 'RSAKey',
+ 'DSSKey',
+ 'Agent',
+ 'Message',
+ 'SSHException',
+ 'PasswordRequiredException',
+ 'BadAuthenticationType',
+ 'SFTP',
+ 'SFTPFile',
+ 'SFTPHandle',
+ 'SFTPClient',
+ 'SFTPServer',
+ 'SFTPError',
+ 'SFTPAttributes',
+ 'SFTPServerInterface'
+ 'ServerInterface',
+ 'BufferedFile',
+ 'Agent',
+ 'AgentKey',
+ 'rsakey',
+ 'dsskey',
+ 'pkey',
+ 'message',
+ 'transport',
+ 'sftp',
+ 'sftp_client',
+ 'sftp_server',
+ 'sftp_attr',
+ 'sftp_file',
+ 'sftp_si',
+ 'sftp_handle',
+ 'server',
+ 'file',
+ 'agent',
+ 'util' ]
diff --git a/paramiko/agent.py b/paramiko/agent.py
new file mode 100644
index 0000000..3555512
--- /dev/null
+++ b/paramiko/agent.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2003-2005 John Rochester <john@jrochester.org>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+SSH Agent interface for Unix clients.
+"""
+
+import os
+import socket
+import struct
+import sys
+
+from paramiko.ssh_exception import SSHException
+from paramiko.message import Message
+from paramiko.pkey import PKey
+
+
+SSH2_AGENTC_REQUEST_IDENTITIES, SSH2_AGENT_IDENTITIES_ANSWER, \
+ SSH2_AGENTC_SIGN_REQUEST, SSH2_AGENT_SIGN_RESPONSE = range(11, 15)
+
+
+class Agent:
+ """
+ Client interface for using private keys from an SSH agent running on the
+ local machine. If an SSH agent is running, this class can be used to
+ connect to it and retreive L{PKey} objects which can be used when
+ attempting to authenticate to remote SSH servers.
+
+ Because the SSH agent protocol uses environment variables and unix-domain
+ sockets, this probably doesn't work on Windows. It does work on most
+ posix platforms though (Linux and MacOS X, for example).
+ """
+
+ def __init__(self):
+ """
+ Open a session with the local machine's SSH agent, if one is running.
+ If no agent is running, initialization will succeed, but L{get_keys}
+ will return an empty tuple.
+
+ @raise SSHException: if an SSH agent is found, but speaks an
+ incompatible protocol
+ """
+ if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
+ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ conn.connect(os.environ['SSH_AUTH_SOCK'])
+ self.conn = conn
+ type, result = self._send_message(chr(SSH2_AGENTC_REQUEST_IDENTITIES))
+ if type != SSH2_AGENT_IDENTITIES_ANSWER:
+ raise SSHException('could not get keys from ssh-agent')
+ keys = []
+ for i in range(result.get_int()):
+ keys.append(AgentKey(self, result.get_string()))
+ result.get_string()
+ self.keys = tuple(keys)
+ else:
+ self.keys = ()
+
+ def close(self):
+ """
+ Close the SSH agent connection.
+ """
+ self.conn.close()
+ self.conn = None
+ self.keys = ()
+
+ def get_keys(self):
+ """
+ Return the list of keys available through the SSH agent, if any. If
+ no SSH agent was running (or it couldn't be contacted), an empty list
+ will be returned.
+
+ @return: a list of keys available on the SSH agent
+ @rtype: tuple of L{AgentKey}
+ """
+ return self.keys
+
+ def _send_message(self, msg):
+ msg = str(msg)
+ self.conn.send(struct.pack('>I', len(msg)) + msg)
+ l = self._read_all(4)
+ msg = Message(self._read_all(struct.unpack('>I', l)[0]))
+ return ord(msg.get_byte()), msg
+
+ def _read_all(self, wanted):
+ result = self.conn.recv(wanted)
+ while len(result) < wanted:
+ if len(result) == 0:
+ raise SSHException('lost ssh-agent')
+ extra = self.conn.recv(wanted - len(result))
+ if len(extra) == 0:
+ raise SSHException('lost ssh-agent')
+ result += extra
+ return result
+
+
+class AgentKey(PKey):
+ """
+ Private key held in a local SSH agent. This type of key can be used for
+ authenticating to a remote server (signing). Most other key operations
+ work as expected.
+ """
+
+ def __init__(self, agent, blob):
+ self.agent = agent
+ self.blob = blob
+ self.name = Message(blob).get_string()
+
+ def __str__(self):
+ return self.blob
+
+ def get_name(self):
+ return self.name
+
+ def sign_ssh_data(self, randpool, data):
+ msg = Message()
+ msg.add_byte(chr(SSH2_AGENTC_SIGN_REQUEST))
+ msg.add_string(self.blob)
+ msg.add_string(data)
+ msg.add_int(0)
+ type, result = self.agent._send_message(msg)
+ if type != SSH2_AGENT_SIGN_RESPONSE:
+ raise SSHException('key cannot be used for signing')
+ return result.get_string()
diff --git a/paramiko/auth_handler.py b/paramiko/auth_handler.py
new file mode 100644
index 0000000..59aa376
--- /dev/null
+++ b/paramiko/auth_handler.py
@@ -0,0 +1,410 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{AuthHandler}
+"""
+
+import threading
+
+# this helps freezing utils
+import encodings.utf_8
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException, BadAuthenticationType, PartialAuthentication
+from paramiko.server import InteractiveQuery
+
+
+class AuthHandler (object):
+ """
+ Internal class to handle the mechanics of authentication.
+ """
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.username = None
+ self.authenticated = False
+ self.auth_event = None
+ self.auth_method = ''
+ self.password = None
+ self.private_key = None
+ # for server mode:
+ self.auth_username = None
+ self.auth_fail_count = 0
+
+ def is_authenticated(self):
+ return self.authenticated
+
+ def get_username(self):
+ if self.transport.server_mode:
+ return self.auth_username
+ else:
+ return self.username
+
+ def auth_none(self, username, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = 'none'
+ self.username = username
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_publickey(self, username, key, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = 'publickey'
+ self.username = username
+ self.private_key = key
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_password(self, username, password, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = 'password'
+ self.username = username
+ self.password = password
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_interactive(self, username, handler, event, submethods=''):
+ """
+ response_list = handler(title, instructions, prompt_list)
+ """
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = 'keyboard-interactive'
+ self.username = username
+ self.interactive_handler = handler
+ self.submethods = submethods
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def abort(self):
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+
+ ### internals...
+
+
+ def _request_auth(self):
+ m = Message()
+ m.add_byte(chr(MSG_SERVICE_REQUEST))
+ m.add_string('ssh-userauth')
+ self.transport._send_message(m)
+
+ def _disconnect_service_not_available(self):
+ m = Message()
+ m.add_byte(chr(MSG_DISCONNECT))
+ m.add_int(DISCONNECT_SERVICE_NOT_AVAILABLE)
+ m.add_string('Service not available')
+ m.add_string('en')
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _disconnect_no_more_auth(self):
+ m = Message()
+ m.add_byte(chr(MSG_DISCONNECT))
+ m.add_int(DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE)
+ m.add_string('No more auth methods available')
+ m.add_string('en')
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _get_session_blob(self, key, service, username):
+ m = Message()
+ m.add_string(self.transport.session_id)
+ m.add_byte(chr(MSG_USERAUTH_REQUEST))
+ m.add_string(username)
+ m.add_string(service)
+ m.add_string('publickey')
+ m.add_boolean(1)
+ m.add_string(key.get_name())
+ m.add_string(str(key))
+ return str(m)
+
+ def wait_for_response(self, event):
+ while True:
+ event.wait(0.1)
+ if not self.transport.is_active():
+ e = self.transport.get_exception()
+ if e is None:
+ e = SSHException('Authentication failed.')
+ raise e
+ if event.isSet():
+ break
+ if not self.is_authenticated():
+ e = self.transport.get_exception()
+ if e is None:
+ e = SSHException('Authentication failed.')
+ # this is horrible. python Exception isn't yet descended from
+ # object, so type(e) won't work. :(
+ if issubclass(e.__class__, PartialAuthentication):
+ return e.allowed_types
+ raise e
+ return []
+
+ def _parse_service_request(self, m):
+ service = m.get_string()
+ if self.transport.server_mode and (service == 'ssh-userauth'):
+ # accepted
+ m = Message()
+ m.add_byte(chr(MSG_SERVICE_ACCEPT))
+ m.add_string(service)
+ self.transport._send_message(m)
+ return
+ # dunno this one
+ self._disconnect_service_not_available()
+
+ def _parse_service_accept(self, m):
+ service = m.get_string()
+ if service == 'ssh-userauth':
+ self.transport._log(DEBUG, 'userauth is OK')
+ m = Message()
+ m.add_byte(chr(MSG_USERAUTH_REQUEST))
+ m.add_string(self.username)
+ m.add_string('ssh-connection')
+ m.add_string(self.auth_method)
+ if self.auth_method == 'password':
+ m.add_boolean(False)
+ m.add_string(self.password.encode('UTF-8'))
+ elif self.auth_method == 'publickey':
+ m.add_boolean(True)
+ m.add_string(self.private_key.get_name())
+ m.add_string(str(self.private_key))
+ blob = self._get_session_blob(self.private_key, 'ssh-connection', self.username)
+ sig = self.private_key.sign_ssh_data(self.transport.randpool, blob)
+ m.add_string(str(sig))
+ elif self.auth_method == 'keyboard-interactive':
+ m.add_string('')
+ m.add_string(self.submethods)
+ elif self.auth_method == 'none':
+ pass
+ else:
+ raise SSHException('Unknown auth method "%s"' % self.auth_method)
+ self.transport._send_message(m)
+ else:
+ self.transport._log(DEBUG, 'Service request "%s" accepted (?)' % service)
+
+ def _send_auth_result(self, username, method, result):
+ # okay, send result
+ m = Message()
+ if result == AUTH_SUCCESSFUL:
+ self.transport._log(INFO, 'Auth granted (%s).' % method)
+ m.add_byte(chr(MSG_USERAUTH_SUCCESS))
+ self.authenticated = True
+ else:
+ self.transport._log(INFO, 'Auth rejected (%s).' % method)
+ m.add_byte(chr(MSG_USERAUTH_FAILURE))
+ m.add_string(self.transport.server_object.get_allowed_auths(username))
+ if result == AUTH_PARTIALLY_SUCCESSFUL:
+ m.add_boolean(1)
+ else:
+ m.add_boolean(0)
+ self.auth_fail_count += 1
+ self.transport._send_message(m)
+ if self.auth_fail_count >= 10:
+ self._disconnect_no_more_auth()
+ if result == AUTH_SUCCESSFUL:
+ self.transport._auth_trigger()
+
+ def _interactive_query(self, q):
+ # make interactive query instead of response
+ m = Message()
+ m.add_byte(chr(MSG_USERAUTH_INFO_REQUEST))
+ m.add_string(q.name)
+ m.add_string(q.instructions)
+ m.add_string('')
+ m.add_int(len(q.prompts))
+ for p in q.prompts:
+ m.add_string(p[0])
+ m.add_boolean(p[1])
+ self.transport._send_message(m)
+
+ def _parse_userauth_request(self, m):
+ if not self.transport.server_mode:
+ # er, uh... what?
+ m = Message()
+ m.add_byte(chr(MSG_USERAUTH_FAILURE))
+ m.add_string('none')
+ m.add_boolean(0)
+ self.transport._send_message(m)
+ return
+ if self.authenticated:
+ # ignore
+ return
+ username = m.get_string()
+ service = m.get_string()
+ method = m.get_string()
+ self.transport._log(DEBUG, 'Auth request (type=%s) service=%s, username=%s' % (method, service, username))
+ if service != 'ssh-connection':
+ self._disconnect_service_not_available()
+ return
+ if (self.auth_username is not None) and (self.auth_username != username):
+ self.transport._log(WARNING, 'Auth rejected because the client attempted to change username in mid-flight')
+ self._disconnect_no_more_auth()
+ return
+ self.auth_username = username
+
+ if method == 'none':
+ result = self.transport.server_object.check_auth_none(username)
+ elif method == 'password':
+ changereq = m.get_boolean()
+ password = m.get_string().decode('UTF-8', 'replace')
+ if changereq:
+ # always treated as failure, since we don't support changing passwords, but collect
+ # the list of valid auth types from the callback anyway
+ self.transport._log(DEBUG, 'Auth request to change passwords (rejected)')
+ newpassword = m.get_string().decode('UTF-8', 'replace')
+ result = AUTH_FAILED
+ else:
+ result = self.transport.server_object.check_auth_password(username, password)
+ elif method == 'publickey':
+ sig_attached = m.get_boolean()
+ keytype = m.get_string()
+ keyblob = m.get_string()
+ try:
+ key = self.transport._key_info[keytype](Message(keyblob))
+ except SSHException, e:
+ self.transport._log(INFO, 'Auth rejected: public key: %s' % str(e))
+ key = None
+ except:
+ self.transport._log(INFO, 'Auth rejected: unsupported or mangled public key')
+ key = None
+ if key is None:
+ self._disconnect_no_more_auth()
+ return
+ # first check if this key is okay... if not, we can skip the verify
+ result = self.transport.server_object.check_auth_publickey(username, key)
+ if result != AUTH_FAILED:
+ # key is okay, verify it
+ if not sig_attached:
+ # client wants to know if this key is acceptable, before it
+ # signs anything... send special "ok" message
+ m = Message()
+ m.add_byte(chr(MSG_USERAUTH_PK_OK))
+ m.add_string(keytype)
+ m.add_string(keyblob)
+ self.transport._send_message(m)
+ return
+ sig = Message(m.get_string())
+ blob = self._get_session_blob(key, service, username)
+ if not key.verify_ssh_sig(blob, sig):
+ self.transport._log(INFO, 'Auth rejected: invalid signature')
+ result = AUTH_FAILED
+ elif method == 'keyboard-interactive':
+ lang = m.get_string()
+ submethods = m.get_string()
+ result = self.transport.server_object.check_auth_interactive(username, submethods)
+ if isinstance(result, InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ else:
+ result = self.transport.server_object.check_auth_none(username)
+ # okay, send result
+ self._send_auth_result(username, method, result)
+
+ def _parse_userauth_success(self, m):
+ self.transport._log(INFO, 'Authentication successful!')
+ self.authenticated = True
+ self.transport._auth_trigger()
+ if self.auth_event != None:
+ self.auth_event.set()
+
+ def _parse_userauth_failure(self, m):
+ authlist = m.get_list()
+ partial = m.get_boolean()
+ if partial:
+ self.transport._log(INFO, 'Authentication continues...')
+ self.transport._log(DEBUG, 'Methods: ' + str(authlist))
+ self.transport.saved_exception = PartialAuthentication(authlist)
+ elif self.auth_method not in authlist:
+ self.transport._log(INFO, 'Authentication type not permitted.')
+ self.transport._log(DEBUG, 'Allowed methods: ' + str(authlist))
+ self.transport.saved_exception = BadAuthenticationType('Bad authentication type', authlist)
+ else:
+ self.transport._log(INFO, 'Authentication failed.')
+ self.authenticated = False
+ self.username = None
+ if self.auth_event != None:
+ self.auth_event.set()
+
+ def _parse_userauth_banner(self, m):
+ banner = m.get_string()
+ lang = m.get_string()
+ self.transport._log(INFO, 'Auth banner: ' + banner)
+ # who cares.
+
+ def _parse_userauth_info_request(self, m):
+ if self.auth_method != 'keyboard-interactive':
+ raise SSHException('Illegal info request from server')
+ title = m.get_string()
+ instructions = m.get_string()
+ m.get_string() # lang
+ prompts = m.get_int()
+ prompt_list = []
+ for i in range(prompts):
+ prompt_list.append((m.get_string(), m.get_boolean()))
+ response_list = self.interactive_handler(title, instructions, prompt_list)
+
+ m = Message()
+ m.add_byte(chr(MSG_USERAUTH_INFO_RESPONSE))
+ m.add_int(len(response_list))
+ for r in response_list:
+ m.add_string(r)
+ self.transport._send_message(m)
+
+ def _parse_userauth_info_response(self, m):
+ if not self.transport.server_mode:
+ raise SSHException('Illegal info response from server')
+ n = m.get_int()
+ responses = []
+ for i in range(n):
+ responses.append(m.get_string())
+ result = self.transport.server_object.check_auth_interactive_response(responses)
+ if isinstance(type(result), InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ self._send_auth_result(self.auth_username, 'keyboard-interactive', result)
+
+
+ _handler_table = {
+ MSG_SERVICE_REQUEST: _parse_service_request,
+ MSG_SERVICE_ACCEPT: _parse_service_accept,
+ MSG_USERAUTH_REQUEST: _parse_userauth_request,
+ MSG_USERAUTH_SUCCESS: _parse_userauth_success,
+ MSG_USERAUTH_FAILURE: _parse_userauth_failure,
+ MSG_USERAUTH_BANNER: _parse_userauth_banner,
+ MSG_USERAUTH_INFO_REQUEST: _parse_userauth_info_request,
+ MSG_USERAUTH_INFO_RESPONSE: _parse_userauth_info_response,
+ }
+
+
diff --git a/paramiko/ber.py b/paramiko/ber.py
new file mode 100644
index 0000000..6a7823d
--- /dev/null
+++ b/paramiko/ber.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+import struct
+import util
+
+
+class BERException (Exception):
+ pass
+
+
+class BER(object):
+ """
+ Robey's tiny little attempt at a BER decoder.
+ """
+
+ def __init__(self, content=''):
+ self.content = content
+ self.idx = 0
+
+ def __str__(self):
+ return self.content
+
+ def __repr__(self):
+ return 'BER(\'' + repr(self.content) + '\')'
+
+ def decode(self):
+ return self.decode_next()
+
+ def decode_next(self):
+ if self.idx >= len(self.content):
+ return None
+ ident = ord(self.content[self.idx])
+ self.idx += 1
+ if (ident & 31) == 31:
+ # identifier > 30
+ ident = 0
+ while self.idx < len(self.content):
+ t = ord(self.content[self.idx])
+ self.idx += 1
+ ident = (ident << 7) | (t & 0x7f)
+ if not (t & 0x80):
+ break
+ if self.idx >= len(self.content):
+ return None
+ # now fetch length
+ size = ord(self.content[self.idx])
+ self.idx += 1
+ if size & 0x80:
+ # more complimicated...
+ # FIXME: theoretically should handle indefinite-length (0x80)
+ t = size & 0x7f
+ if self.idx + t > len(self.content):
+ return None
+ size = util.inflate_long(self.content[self.idx : self.idx + t], True)
+ self.idx += t
+ if self.idx + size > len(self.content):
+ # can't fit
+ return None
+ data = self.content[self.idx : self.idx + size]
+ self.idx += size
+ # now switch on id
+ if ident == 0x30:
+ # sequence
+ return self.decode_sequence(data)
+ elif ident == 2:
+ # int
+ return util.inflate_long(data)
+ else:
+ # 1: boolean (00 false, otherwise true)
+ raise BERException('Unknown ber encoding type %d (robey is lazy)' % ident)
+
+ def decode_sequence(data):
+ out = []
+ b = BER(data)
+ while True:
+ x = b.decode_next()
+ if x is None:
+ return out
+ out.append(x)
+ decode_sequence = staticmethod(decode_sequence)
+
+ def encode_tlv(self, ident, val):
+ # no need to support ident > 31 here
+ self.content += chr(ident)
+ if len(val) > 0x7f:
+ lenstr = util.deflate_long(len(val))
+ self.content += chr(0x80 + len(lenstr)) + lenstr
+ else:
+ self.content += chr(len(val))
+ self.content += val
+
+ def encode(self, x):
+ if type(x) is bool:
+ if x:
+ self.encode_tlv(1, '\xff')
+ else:
+ self.encode_tlv(1, '\x00')
+ elif (type(x) is int) or (type(x) is long):
+ self.encode_tlv(2, util.deflate_long(x))
+ elif type(x) is str:
+ self.encode_tlv(4, x)
+ elif (type(x) is list) or (type(x) is tuple):
+ self.encode_tlv(0x30, self.encode_sequence(x))
+ else:
+ raise BERException('Unknown type for encoding: %s' % repr(type(x)))
+
+ def encode_sequence(data):
+ b = BER()
+ for item in data:
+ b.encode(item)
+ return str(b)
+ encode_sequence = staticmethod(encode_sequence)
diff --git a/paramiko/channel.py b/paramiko/channel.py
new file mode 100644
index 0000000..8a00233
--- /dev/null
+++ b/paramiko/channel.py
@@ -0,0 +1,1174 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Abstraction for an SSH2 channel.
+"""
+
+import sys
+import time
+import threading
+import socket
+import os
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+from paramiko.file import BufferedFile
+from paramiko import pipe
+
+
+class Channel (object):
+ """
+ A secure tunnel across an SSH L{Transport}. A Channel is meant to behave
+ like a socket, and has an API that should be indistinguishable from the
+ python socket API.
+
+ Because SSH2 has a windowing kind of flow control, if you stop reading data
+ from a Channel and its buffer fills up, the server will be unable to send
+ you any more data until you read some of it. (This won't affect other
+ channels on the same transport -- all channels on a single transport are
+ flow-controlled independently.) Similarly, if the server isn't reading
+ data you send, calls to L{send} may block, unless you set a timeout. This
+ is exactly like a normal network socket, so it shouldn't be too surprising.
+ """
+
+ # lower bound on the max packet size we'll accept from the remote host
+ MIN_PACKET_SIZE = 1024
+
+ def __init__(self, chanid):
+ """
+ Create a new channel. The channel is not associated with any
+ particular session or L{Transport} until the Transport attaches it.
+ Normally you would only call this method from the constructor of a
+ subclass of L{Channel}.
+
+ @param chanid: the ID of this channel, as passed by an existing
+ L{Transport}.
+ @type chanid: int
+ """
+ self.chanid = chanid
+ self.remote_chanid = 0
+ self.transport = None
+ self.active = False
+ self.eof_received = 0
+ self.eof_sent = 0
+ self.in_buffer = ''
+ self.in_stderr_buffer = ''
+ self.timeout = None
+ self.closed = False
+ self.ultra_debug = False
+ self.lock = threading.Lock()
+ self.in_buffer_cv = threading.Condition(self.lock)
+ self.in_stderr_buffer_cv = threading.Condition(self.lock)
+ self.out_buffer_cv = threading.Condition(self.lock)
+ self.in_window_size = 0
+ self.out_window_size = 0
+ self.in_max_packet_size = 0
+ self.out_max_packet_size = 0
+ self.in_window_threshold = 0
+ self.in_window_sofar = 0
+ self.status_event = threading.Event()
+ self.name = str(chanid)
+ self.logger = util.get_logger('paramiko.chan.' + str(chanid))
+ self.pipe = None
+ self.event = threading.Event()
+ self.combine_stderr = False
+ self.exit_status = -1
+
+ def __del__(self):
+ self.close()
+
+ def __repr__(self):
+ """
+ Return a string representation of this object, for debugging.
+
+ @rtype: str
+ """
+ out = '<paramiko.Channel %d' % self.chanid
+ if self.closed:
+ out += ' (closed)'
+ elif self.active:
+ if self.eof_received:
+ out += ' (EOF received)'
+ if self.eof_sent:
+ out += ' (EOF sent)'
+ out += ' (open) window=%d' % (self.out_window_size)
+ if len(self.in_buffer) > 0:
+ out += ' in-buffer=%d' % (len(self.in_buffer),)
+ out += ' -> ' + repr(self.transport)
+ out += '>'
+ return out
+
+ def get_pty(self, term='vt100', width=80, height=24):
+ """
+ Request a pseudo-terminal from the server. This is usually used right
+ after creating a client channel, to ask the server to provide some
+ basic terminal semantics for a shell invoked with L{invoke_shell}.
+ It isn't necessary (or desirable) to call this method if you're going
+ to exectue a single command with L{exec_command}.
+
+ @param term: the terminal type to emulate (for example, C{'vt100'}).
+ @type term: str
+ @param width: width (in characters) of the terminal screen
+ @type width: int
+ @param height: height (in characters) of the terminal screen
+ @type height: int
+ @return: C{True} if the operation succeeded; C{False} if not.
+ @rtype: bool
+ """
+ if self.closed or self.eof_received or self.eof_sent or not self.active:
+ raise SSHException('Channel is not open')
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('pty-req')
+ m.add_boolean(True)
+ m.add_string(term)
+ m.add_int(width)
+ m.add_int(height)
+ # pixel height, width (usually useless)
+ m.add_int(0).add_int(0)
+ m.add_string('')
+ self.event.clear()
+ self.transport._send_user_message(m)
+ while True:
+ self.event.wait(0.1)
+ if self.closed:
+ return False
+ if self.event.isSet():
+ return True
+
+ def invoke_shell(self):
+ """
+ Request an interactive shell session on this channel. If the server
+ allows it, the channel will then be directly connected to the stdin,
+ stdout, and stderr of the shell.
+
+ Normally you would call L{get_pty} before this, in which case the
+ shell will operate through the pty, and the channel will be connected
+ to the stdin and stdout of the pty.
+
+ When the shell exits, the channel will be closed and can't be reused.
+ You must open a new channel if you wish to open another shell.
+
+ @return: C{True} if the operation succeeded; C{False} if not.
+ @rtype: bool
+ """
+ if self.closed or self.eof_received or self.eof_sent or not self.active:
+ raise SSHException('Channel is not open')
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('shell')
+ m.add_boolean(1)
+ self.event.clear()
+ self.transport._send_user_message(m)
+ while True:
+ self.event.wait(0.1)
+ if self.closed:
+ return False
+ if self.event.isSet():
+ return True
+
+ def exec_command(self, command):
+ """
+ Execute a command on the server. If the server allows it, the channel
+ will then be directly connected to the stdin, stdout, and stderr of
+ the command being executed.
+
+ When the command finishes executing, the channel will be closed and
+ can't be reused. You must open a new channel if you wish to execute
+ another command.
+
+ @param command: a shell command to execute.
+ @type command: str
+ @return: C{True} if the operation succeeded; C{False} if not.
+ @rtype: bool
+ """
+ if self.closed or self.eof_received or self.eof_sent or not self.active:
+ raise SSHException('Channel is not open')
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('exec')
+ m.add_boolean(1)
+ m.add_string(command)
+ self.event.clear()
+ self.transport._send_user_message(m)
+ while True:
+ self.event.wait(0.1)
+ if self.closed:
+ return False
+ if self.event.isSet():
+ return True
+
+ def invoke_subsystem(self, subsystem):
+ """
+ Request a subsystem on the server (for example, C{sftp}). If the
+ server allows it, the channel will then be directly connected to the
+ requested subsystem.
+
+ When the subsystem finishes, the channel will be closed and can't be
+ reused.
+
+ @param subsystem: name of the subsystem being requested.
+ @type subsystem: str
+ @return: C{True} if the operation succeeded; C{False} if not.
+ @rtype: bool
+ """
+ if self.closed or self.eof_received or self.eof_sent or not self.active:
+ raise SSHException('Channel is not open')
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('subsystem')
+ m.add_boolean(1)
+ m.add_string(subsystem)
+ self.event.clear()
+ self.transport._send_user_message(m)
+ while True:
+ self.event.wait(0.1)
+ if self.closed:
+ return False
+ if self.event.isSet():
+ return True
+
+ def resize_pty(self, width=80, height=24):
+ """
+ Resize the pseudo-terminal. This can be used to change the width and
+ height of the terminal emulation created in a previous L{get_pty} call.
+
+ @param width: new width (in characters) of the terminal screen
+ @type width: int
+ @param height: new height (in characters) of the terminal screen
+ @type height: int
+ @return: C{True} if the operation succeeded; C{False} if not.
+ @rtype: bool
+ """
+ if self.closed or self.eof_received or self.eof_sent or not self.active:
+ raise SSHException('Channel is not open')
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('window-change')
+ m.add_boolean(1)
+ m.add_int(width)
+ m.add_int(height)
+ m.add_int(0).add_int(0)
+ self.event.clear()
+ self.transport._send_user_message(m)
+ while True:
+ self.event.wait(0.1)
+ if self.closed:
+ return False
+ if self.event.isSet():
+ return True
+
+ def recv_exit_status(self):
+ """
+ Return the exit status from the process on the server. This is
+ mostly useful for retrieving the reults of an L{exec_command}.
+ If the command hasn't finished yet, this method will wait until
+ it does, or until the channel is closed. If no exit status is
+ provided by the server, -1 is returned.
+
+ @return: the exit code of the process on the server.
+ @rtype: int
+
+ @since: 1.2
+ """
+ while True:
+ if self.closed or self.status_event.isSet():
+ return self.exit_status
+ self.status_event.wait(0.1)
+
+ def send_exit_status(self, status):
+ """
+ Send the exit status of an executed command to the client. (This
+ really only makes sense in server mode.) Many clients expect to
+ get some sort of status code back from an executed command after
+ it completes.
+
+ @param status: the exit code of the process
+ @type status: int
+
+ @since: 1.2
+ """
+ # in many cases, the channel will not still be open here.
+ # that's fine.
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_REQUEST))
+ m.add_int(self.remote_chanid)
+ m.add_string('exit-status')
+ m.add_boolean(0)
+ m.add_int(status)
+ self.transport._send_user_message(m)
+
+ def get_transport(self):
+ """
+ Return the L{Transport} associated with this channel.
+
+ @return: the L{Transport} that was used to create this channel.
+ @rtype: L{Transport}
+ """
+ return self.transport
+
+ def set_name(self, name):
+ """
+ Set a name for this channel. Currently it's only used to set the name
+ of the log level used for debugging. The name can be fetched with the
+ L{get_name} method.
+
+ @param name: new channel name.
+ @type name: str
+ """
+ self.name = name
+ self.logger = util.get_logger(self.transport.get_log_channel() + '.' + self.name)
+
+ def get_name(self):
+ """
+ Get the name of this channel that was previously set by L{set_name}.
+
+ @return: the name of this channel.
+ @rtype: str
+ """
+ return self.name
+
+ def get_id(self):
+ """
+ Return the ID # for this channel. The channel ID is unique across
+ a L{Transport} and usually a small number. It's also the number
+ passed to L{ServerInterface.check_channel_request} when determining
+ whether to accept a channel request in server mode.
+
+ @return: the ID of this channel.
+ @rtype: int
+
+ @since: ivysaur
+ """
+ return self.chanid
+
+ def set_combine_stderr(self, combine):
+ """
+ Set whether stderr should be combined into stdout on this channel.
+ The default is C{False}, but in some cases it may be convenient to
+ have both streams combined.
+
+ If this is C{False}, and L{exec_command} is called (or C{invoke_shell}
+ with no pty), output to stderr will not show up through the L{recv}
+ and L{recv_ready} calls. You will have to use L{recv_stderr} and
+ L{recv_stderr_ready} to get stderr output.
+
+ If this is C{True}, data will never show up via L{recv_stderr} or
+ L{recv_stderr_ready}.
+
+ @param combine: C{True} if stderr output should be combined into
+ stdout on this channel.
+ @type combine: bool
+ @return: previous setting.
+ @rtype: bool
+
+ @since: 1.1
+ """
+ data = ''
+ self.lock.acquire()
+ try:
+ old = self.combine_stderr
+ self.combine_stderr = combine
+ if combine and not old:
+ # copy old stderr buffer into primary buffer
+ data = self.in_stderr_buffer
+ self.in_stderr_buffer = ''
+ finally:
+ self.lock.release()
+ if len(data) > 0:
+ self._feed(data)
+ return old
+
+
+ ### socket API
+
+
+ def settimeout(self, timeout):
+ """
+ Set a timeout on blocking read/write operations. The C{timeout}
+ argument can be a nonnegative float expressing seconds, or C{None}. If
+ a float is given, subsequent channel read/write operations will raise
+ a timeout exception if the timeout period value has elapsed before the
+ operation has completed. Setting a timeout of C{None} disables
+ timeouts on socket operations.
+
+ C{chan.settimeout(0.0)} is equivalent to C{chan.setblocking(0)};
+ C{chan.settimeout(None)} is equivalent to C{chan.setblocking(1)}.
+
+ @param timeout: seconds to wait for a pending read/write operation
+ before raising C{socket.timeout}, or C{None} for no timeout.
+ @type timeout: float
+ """
+ self.timeout = timeout
+
+ def gettimeout(self):
+ """
+ Returns the timeout in seconds (as a float) associated with socket
+ operations, or C{None} if no timeout is set. This reflects the last
+ call to L{setblocking} or L{settimeout}.
+
+ @return: timeout in seconds, or C{None}.
+ @rtype: float
+ """
+ return self.timeout
+
+ def setblocking(self, blocking):
+ """
+ Set blocking or non-blocking mode of the channel: if C{blocking} is 0,
+ the channel is set to non-blocking mode; otherwise it's set to blocking
+ mode. Initially all channels are in blocking mode.
+
+ In non-blocking mode, if a L{recv} call doesn't find any data, or if a
+ L{send} call can't immediately dispose of the data, an error exception
+ is raised. In blocking mode, the calls block until they can proceed.
+
+ C{chan.setblocking(0)} is equivalent to C{chan.settimeout(0)};
+ C{chan.setblocking(1)} is equivalent to C{chan.settimeout(None)}.
+
+ @param blocking: 0 to set non-blocking mode; non-0 to set blocking
+ mode.
+ @type blocking: int
+ """
+ if blocking:
+ self.settimeout(None)
+ else:
+ self.settimeout(0.0)
+
+ def close(self):
+ """
+ Close the channel. All future read/write operations on the channel
+ will fail. The remote end will receive no more data (after queued data
+ is flushed). Channels are automatically closed when their L{Transport}
+ is closed or when they are garbage collected.
+ """
+ self.lock.acquire()
+ try:
+ if not self.active or self.closed:
+ return
+ msgs = self._close_internal()
+
+ # only close the pipe when the user explicitly closes the channel.
+ # otherwise they will get unpleasant surprises.
+ if self.pipe is not None:
+ self.pipe.close()
+ self.pipe = None
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def recv_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel. A C{False} result does not mean that the channel has closed;
+ it means you may need to wait before more data arrives.
+
+ @return: C{True} if a L{recv} call on this channel would immediately
+ return at least one byte; C{False} otherwise.
+ @rtype: boolean
+ """
+ self.lock.acquire()
+ try:
+ if len(self.in_buffer) == 0:
+ return False
+ return True
+ finally:
+ self.lock.release()
+
+ def recv(self, nbytes):
+ """
+ Receive data from the channel. The return value is a string
+ representing the data received. The maximum amount of data to be
+ received at once is specified by C{nbytes}. If a string of length zero
+ is returned, the channel stream has closed.
+
+ @param nbytes: maximum number of bytes to read.
+ @type nbytes: int
+ @return: data.
+ @rtype: str
+
+ @raise socket.timeout: if no data is ready before the timeout set by
+ L{settimeout}.
+ """
+ out = ''
+ self.lock.acquire()
+ try:
+ if len(self.in_buffer) == 0:
+ if self.closed or self.eof_received:
+ return out
+ # should we block?
+ if self.timeout == 0.0:
+ raise socket.timeout()
+ # loop here in case we get woken up but a different thread has grabbed everything in the buffer
+ timeout = self.timeout
+ while (len(self.in_buffer) == 0) and not self.closed and not self.eof_received:
+ then = time.time()
+ self.in_buffer_cv.wait(timeout)
+ if timeout != None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise socket.timeout()
+ # something in the buffer and we have the lock
+ if len(self.in_buffer) <= nbytes:
+ out = self.in_buffer
+ self.in_buffer = ''
+ if self.pipe is not None:
+ # clear the pipe, since no more data is buffered
+ self.pipe.clear()
+ else:
+ out = self.in_buffer[:nbytes]
+ self.in_buffer = self.in_buffer[nbytes:]
+ ack = self._check_add_window(len(out))
+ finally:
+ self.lock.release()
+
+ # no need to hold the channel lock when sending this
+ if ack > 0:
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
+ m.add_int(self.remote_chanid)
+ m.add_int(ack)
+ self.transport._send_user_message(m)
+
+ return out
+
+ def recv_stderr_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel's stderr stream. Only channels using L{exec_command} or
+ L{invoke_shell} without a pty will ever have data on the stderr
+ stream.
+
+ @return: C{True} if a L{recv_stderr} call on this channel would
+ immediately return at least one byte; C{False} otherwise.
+ @rtype: boolean
+
+ @since: 1.1
+ """
+ self.lock.acquire()
+ try:
+ if len(self.in_stderr_buffer) == 0:
+ return False
+ return True
+ finally:
+ self.lock.release()
+
+ def recv_stderr(self, nbytes):
+ """
+ Receive data from the channel's stderr stream. Only channels using
+ L{exec_command} or L{invoke_shell} without a pty will ever have data
+ on the stderr stream. The return value is a string representing the
+ data received. The maximum amount of data to be received at once is
+ specified by C{nbytes}. If a string of length zero is returned, the
+ channel stream has closed.
+
+ @param nbytes: maximum number of bytes to read.
+ @type nbytes: int
+ @return: data.
+ @rtype: str
+
+ @raise socket.timeout: if no data is ready before the timeout set by
+ L{settimeout}.
+
+ @since: 1.1
+ """
+ out = ''
+ self.lock.acquire()
+ try:
+ if len(self.in_stderr_buffer) == 0:
+ if self.closed or self.eof_received:
+ return out
+ # should we block?
+ if self.timeout == 0.0:
+ raise socket.timeout()
+ # loop here in case we get woken up but a different thread has grabbed everything in the buffer
+ timeout = self.timeout
+ while (len(self.in_stderr_buffer) == 0) and not self.closed and not self.eof_received:
+ then = time.time()
+ self.in_stderr_buffer_cv.wait(timeout)
+ if timeout != None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise socket.timeout()
+ # something in the buffer and we have the lock
+ if len(self.in_stderr_buffer) <= nbytes:
+ out = self.in_stderr_buffer
+ self.in_stderr_buffer = ''
+ else:
+ out = self.in_stderr_buffer[:nbytes]
+ self.in_stderr_buffer = self.in_stderr_buffer[nbytes:]
+ self._check_add_window(len(out))
+ finally:
+ self.lock.release()
+ return out
+
+ def send(self, s):
+ """
+ Send data to the channel. Returns the number of bytes sent, or 0 if
+ the channel stream is closed. Applications are responsible for
+ checking that all data has been sent: if only some of the data was
+ transmitted, the application needs to attempt delivery of the remaining
+ data.
+
+ @param s: data to send.
+ @type s: str
+ @return: number of bytes actually sent.
+ @rtype: int
+
+ @raise socket.timeout: if no data could be sent before the timeout set
+ by L{settimeout}.
+ """
+ size = len(s)
+ self.lock.acquire()
+ try:
+ size = self._wait_for_send_window(size)
+ if size == 0:
+ # eof or similar
+ return 0
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_DATA))
+ m.add_int(self.remote_chanid)
+ m.add_string(s[:size])
+ self.transport._send_user_message(m)
+ finally:
+ self.lock.release()
+ return size
+
+ def send_stderr(self, s):
+ """
+ Send data to the channel on the "stderr" stream. This is normally
+ only used by servers to send output from shell commands -- clients
+ won't use this. Returns the number of bytes sent, or 0 if the channel
+ stream is closed. Applications are responsible for checking that all
+ data has been sent: if only some of the data was transmitted, the
+ application needs to attempt delivery of the remaining data.
+
+ @param s: data to send.
+ @type s: str
+ @return: number of bytes actually sent.
+ @rtype: int
+
+ @raise socket.timeout: if no data could be sent before the timeout set
+ by L{settimeout}.
+
+ @since: 1.1
+ """
+ size = len(s)
+ self.lock.acquire()
+ try:
+ size = self._wait_for_send_window(size)
+ if size == 0:
+ # eof or similar
+ return 0
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_EXTENDED_DATA))
+ m.add_int(self.remote_chanid)
+ m.add_int(1)
+ m.add_string(s[:size])
+ self.transport._send_user_message(m)
+ finally:
+ self.lock.release()
+ return size
+
+ def sendall(self, s):
+ """
+ Send data to the channel, without allowing partial results. Unlike
+ L{send}, this method continues to send data from the given string until
+ either all data has been sent or an error occurs. Nothing is returned.
+
+ @param s: data to send.
+ @type s: str
+
+ @raise socket.timeout: if sending stalled for longer than the timeout
+ set by L{settimeout}.
+ @raise socket.error: if an error occured before the entire string was
+ sent.
+
+ @note: If the channel is closed while only part of the data hase been
+ sent, there is no way to determine how much data (if any) was sent.
+ This is irritating, but identically follows python's API.
+ """
+ while s:
+ if self.closed:
+ # this doesn't seem useful, but it is the documented behavior of Socket
+ raise socket.error('Socket is closed')
+ sent = self.send(s)
+ s = s[sent:]
+ return None
+
+ def sendall_stderr(self, s):
+ """
+ Send data to the channel's "stderr" stream, without allowing partial
+ results. Unlike L{send_stderr}, this method continues to send data
+ from the given string until all data has been sent or an error occurs.
+ Nothing is returned.
+
+ @param s: data to send to the client as "stderr" output.
+ @type s: str
+
+ @raise socket.timeout: if sending stalled for longer than the timeout
+ set by L{settimeout}.
+ @raise socket.error: if an error occured before the entire string was
+ sent.
+
+ @since: 1.1
+ """
+ while s:
+ if self.closed:
+ raise socket.error('Socket is closed')
+ sent = self.send_stderr(s)
+ s = s[sent:]
+ return None
+
+ def makefile(self, *params):
+ """
+ Return a file-like object associated with this channel. The optional
+ C{mode} and C{bufsize} arguments are interpreted the same way as by
+ the built-in C{file()} function in python.
+
+ @return: object which can be used for python file I/O.
+ @rtype: L{ChannelFile}
+ """
+ return ChannelFile(*([self] + list(params)))
+
+ def makefile_stderr(self, *params):
+ """
+ Return a file-like object associated with this channel's stderr
+ stream. Only channels using L{exec_command} or L{invoke_shell}
+ without a pty will ever have data on the stderr stream.
+
+ The optional C{mode} and C{bufsize} arguments are interpreted the
+ same way as by the built-in C{file()} function in python. For a
+ client, it only makes sense to open this file for reading. For a
+ server, it only makes sense to open this file for writing.
+
+ @return: object which can be used for python file I/O.
+ @rtype: L{ChannelFile}
+
+ @since: 1.1
+ """
+ return ChannelStderrFile(*([self] + list(params)))
+
+ def fileno(self):
+ """
+ Returns an OS-level file descriptor which can be used for polling, but
+ but I{not} for reading or writing). This is primaily to allow python's
+ C{select} module to work.
+
+ The first time C{fileno} is called on a channel, a pipe is created to
+ simulate real OS-level file descriptor (FD) behavior. Because of this,
+ two OS-level FDs are created, which will use up FDs faster than normal.
+ You won't notice this effect unless you open hundreds or thousands of
+ channels simultaneously, but it's still notable.
+
+ @return: an OS-level file descriptor
+ @rtype: int
+
+ @warning: This method causes channel reads to be slightly less
+ efficient.
+ """
+ self.lock.acquire()
+ try:
+ if self.pipe is not None:
+ return self.pipe.fileno()
+ # create the pipe and feed in any existing data
+ self.pipe = pipe.make_pipe()
+ if len(self.in_buffer) > 0:
+ self.pipe.set()
+ return self.pipe.fileno()
+ finally:
+ self.lock.release()
+
+ def shutdown(self, how):
+ """
+ Shut down one or both halves of the connection. If C{how} is 0,
+ further receives are disallowed. If C{how} is 1, further sends
+ are disallowed. If C{how} is 2, further sends and receives are
+ disallowed. This closes the stream in one or both directions.
+
+ @param how: 0 (stop receiving), 1 (stop sending), or 2 (stop
+ receiving and sending).
+ @type how: int
+ """
+ if (how == 0) or (how == 2):
+ # feign "read" shutdown
+ self.eof_received = 1
+ if (how == 1) or (how == 2):
+ self.lock.acquire()
+ try:
+ m = self._send_eof()
+ finally:
+ self.lock.release()
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def shutdown_read(self):
+ """
+ Shutdown the receiving side of this socket, closing the stream in
+ the incoming direction. After this call, future reads on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to C{shutdown(0)}, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ @since: 1.2
+ """
+ self.shutdown(0)
+
+ def shutdown_write(self):
+ """
+ Shutdown the sending side of this socket, closing the stream in
+ the outgoing direction. After this call, future writes on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to C{shutdown(1)}, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ @since: 1.2
+ """
+ self.shutdown(1)
+
+
+ ### calls from Transport
+
+
+ def _set_transport(self, transport):
+ self.transport = transport
+ self.logger = util.get_logger(self.transport.get_log_channel() + '.' + self.name)
+
+ def _set_window(self, window_size, max_packet_size):
+ self.in_window_size = window_size
+ self.in_max_packet_size = max_packet_size
+ # threshold of bytes we receive before we bother to send a window update
+ self.in_window_threshold = window_size // 10
+ self.in_window_sofar = 0
+ self._log(DEBUG, 'Max packet in: %d bytes' % max_packet_size)
+
+ def _set_remote_channel(self, chanid, window_size, max_packet_size):
+ self.remote_chanid = chanid
+ self.out_window_size = window_size
+ self.out_max_packet_size = max(max_packet_size, self.MIN_PACKET_SIZE)
+ self.active = 1
+ self._log(DEBUG, 'Max packet out: %d bytes' % max_packet_size)
+
+ def _request_success(self, m):
+ self._log(DEBUG, 'Sesch channel %d request ok' % self.chanid)
+ self.event.set()
+ return
+
+ def _request_failed(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def _feed(self, m):
+ if type(m) is str:
+ # passed from _feed_extended
+ s = m
+ else:
+ s = m.get_string()
+ self.lock.acquire()
+ try:
+ if self.ultra_debug:
+ self._log(DEBUG, 'fed %d bytes' % len(s))
+ if self.pipe is not None:
+ self.pipe.set()
+ self.in_buffer += s
+ self.in_buffer_cv.notifyAll()
+ finally:
+ self.lock.release()
+
+ def _feed_extended(self, m):
+ code = m.get_int()
+ s = m.get_string()
+ if code != 1:
+ self._log(ERROR, 'unknown extended_data type %d; discarding' % code)
+ return
+ if self.combine_stderr:
+ return self._feed(s)
+ self.lock.acquire()
+ try:
+ if self.ultra_debug:
+ self._log(DEBUG, 'fed %d stderr bytes' % len(s))
+ self.in_stderr_buffer += s
+ self.in_stderr_buffer_cv.notifyAll()
+ finally:
+ self.lock.release()
+
+ def _window_adjust(self, m):
+ nbytes = m.get_int()
+ self.lock.acquire()
+ try:
+ if self.ultra_debug:
+ self._log(DEBUG, 'window up %d' % nbytes)
+ self.out_window_size += nbytes
+ self.out_buffer_cv.notifyAll()
+ finally:
+ self.lock.release()
+
+ def _handle_request(self, m):
+ key = m.get_string()
+ want_reply = m.get_boolean()
+ server = self.transport.server_object
+ ok = False
+ if key == 'exit-status':
+ self.exit_status = m.get_int()
+ self.status_event.set()
+ ok = True
+ elif key == 'xon-xoff':
+ # ignore
+ ok = True
+ elif key == 'pty-req':
+ term = m.get_string()
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ modes = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_pty_request(self, term, width, height, pixelwidth,
+ pixelheight, modes)
+ elif key == 'shell':
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_shell_request(self)
+ elif key == 'exec':
+ cmd = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_exec_request(self, cmd)
+ elif key == 'subsystem':
+ name = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_subsystem_request(self, name)
+ elif key == 'window-change':
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_window_change_request(self, width, height, pixelwidth,
+ pixelheight)
+ else:
+ self._log(DEBUG, 'Unhandled channel request "%s"' % key)
+ ok = False
+ if want_reply:
+ m = Message()
+ if ok:
+ m.add_byte(chr(MSG_CHANNEL_SUCCESS))
+ else:
+ m.add_byte(chr(MSG_CHANNEL_FAILURE))
+ m.add_int(self.remote_chanid)
+ self.transport._send_user_message(m)
+
+ def _handle_eof(self, m):
+ self.lock.acquire()
+ try:
+ if not self.eof_received:
+ self.eof_received = True
+ self.in_buffer_cv.notifyAll()
+ self.in_stderr_buffer_cv.notifyAll()
+ if self.pipe is not None:
+ self.pipe.set_forever()
+ finally:
+ self.lock.release()
+ self._log(DEBUG, 'EOF received')
+
+ def _handle_close(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+
+ ### internals...
+
+
+ def _log(self, level, msg):
+ self.logger.log(level, msg)
+
+ def _set_closed(self):
+ # you are holding the lock.
+ self.closed = True
+ self.in_buffer_cv.notifyAll()
+ self.in_stderr_buffer_cv.notifyAll()
+ self.out_buffer_cv.notifyAll()
+ if self.pipe is not None:
+ self.pipe.set_forever()
+
+ def _send_eof(self):
+ # you are holding the lock.
+ if self.eof_sent:
+ return None
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_EOF))
+ m.add_int(self.remote_chanid)
+ self.eof_sent = True
+ self._log(DEBUG, 'EOF sent')
+ return m
+
+ def _close_internal(self):
+ # you are holding the lock.
+ if not self.active or self.closed:
+ return None, None
+ m1 = self._send_eof()
+ m2 = Message()
+ m2.add_byte(chr(MSG_CHANNEL_CLOSE))
+ m2.add_int(self.remote_chanid)
+ self._set_closed()
+ # can't unlink from the Transport yet -- the remote side may still
+ # try to send meta-data (exit-status, etc)
+ return m1, m2
+
+ def _unlink(self):
+ # server connection could die before we become active: still signal the close!
+ if self.closed:
+ return
+ self.lock.acquire()
+ try:
+ self._set_closed()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+
+ def _check_add_window(self, n):
+ # already holding the lock!
+ if self.closed or self.eof_received or not self.active:
+ return 0
+ if self.ultra_debug:
+ self._log(DEBUG, 'addwindow %d' % n)
+ self.in_window_sofar += n
+ if self.in_window_sofar <= self.in_window_threshold:
+ return 0
+ if self.ultra_debug:
+ self._log(DEBUG, 'addwindow send %d' % self.in_window_sofar)
+ out = self.in_window_sofar
+ self.in_window_sofar = 0
+ return out
+
+ def _wait_for_send_window(self, size):
+ """
+ (You are already holding the lock.)
+ Wait for the send window to open up, and allocate up to C{size} bytes
+ for transmission. If no space opens up before the timeout, a timeout
+ exception is raised. Returns the number of bytes available to send
+ (may be less than requested).
+ """
+ # you are already holding the lock
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size == 0:
+ # should we block?
+ if self.timeout == 0.0:
+ raise socket.timeout()
+ # loop here in case we get woken up but a different thread has filled the buffer
+ timeout = self.timeout
+ while self.out_window_size == 0:
+ if self.closed or self.eof_sent:
+ return 0
+ then = time.time()
+ self.out_buffer_cv.wait(timeout)
+ if timeout != None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise socket.timeout()
+ # we have some window to squeeze into
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size < size:
+ size = self.out_window_size
+ if self.out_max_packet_size - 64 < size:
+ size = self.out_max_packet_size - 64
+ self.out_window_size -= size
+ if self.ultra_debug:
+ self._log(DEBUG, 'window down to %d' % self.out_window_size)
+ return size
+
+
+class ChannelFile (BufferedFile):
+ """
+ A file-like wrapper around L{Channel}. A ChannelFile is created by calling
+ L{Channel.makefile}.
+
+ @bug: To correctly emulate the file object created from a socket's
+ C{makefile} method, a L{Channel} and its C{ChannelFile} should be able
+ to be closed or garbage-collected independently. Currently, closing
+ the C{ChannelFile} does nothing but flush the buffer.
+ """
+
+ def __init__(self, channel, mode = 'r', bufsize = -1):
+ self.channel = channel
+ BufferedFile.__init__(self)
+ self._set_mode(mode, bufsize)
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+
+ @rtype: str
+ """
+ return '<paramiko.ChannelFile from ' + repr(self.channel) + '>'
+
+ def _read(self, size):
+ return self.channel.recv(size)
+
+ def _write(self, data):
+ self.channel.sendall(data)
+ return len(data)
+
+ seek = BufferedFile.seek
+
+
+class ChannelStderrFile (ChannelFile):
+ def __init__(self, channel, mode = 'r', bufsize = -1):
+ ChannelFile.__init__(self, channel, mode, bufsize)
+
+ def _read(self, size):
+ return self.channel.recv_stderr(size)
+
+ def _write(self, data):
+ self.channel.sendall_stderr(data)
+ return len(data)
+
+
+# vim: set shiftwidth=4 expandtab :
diff --git a/paramiko/common.py b/paramiko/common.py
new file mode 100644
index 0000000..c5999e6
--- /dev/null
+++ b/paramiko/common.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Common constants and global variables.
+"""
+
+MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, MSG_SERVICE_REQUEST, \
+ MSG_SERVICE_ACCEPT = range(1, 7)
+MSG_KEXINIT, MSG_NEWKEYS = range(20, 22)
+MSG_USERAUTH_REQUEST, MSG_USERAUTH_FAILURE, MSG_USERAUTH_SUCCESS, \
+ MSG_USERAUTH_BANNER = range(50, 54)
+MSG_USERAUTH_PK_OK = 60
+MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE = range(60, 62)
+MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE = range(80, 83)
+MSG_CHANNEL_OPEN, MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, \
+ MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, \
+ MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MSG_CHANNEL_REQUEST, \
+ MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE = range(90, 101)
+
+
+# for debugging:
+MSG_NAMES = {
+ MSG_DISCONNECT: 'disconnect',
+ MSG_IGNORE: 'ignore',
+ MSG_UNIMPLEMENTED: 'unimplemented',
+ MSG_DEBUG: 'debug',
+ MSG_SERVICE_REQUEST: 'service-request',
+ MSG_SERVICE_ACCEPT: 'service-accept',
+ MSG_KEXINIT: 'kexinit',
+ MSG_NEWKEYS: 'newkeys',
+ 30: 'kex30',
+ 31: 'kex31',
+ 32: 'kex32',
+ 33: 'kex33',
+ 34: 'kex34',
+ MSG_USERAUTH_REQUEST: 'userauth-request',
+ MSG_USERAUTH_FAILURE: 'userauth-failure',
+ MSG_USERAUTH_SUCCESS: 'userauth-success',
+ MSG_USERAUTH_BANNER: 'userauth--banner',
+ MSG_USERAUTH_PK_OK: 'userauth-60(pk-ok/info-request)',
+ MSG_USERAUTH_INFO_RESPONSE: 'userauth-info-response',
+ MSG_GLOBAL_REQUEST: 'global-request',
+ MSG_REQUEST_SUCCESS: 'request-success',
+ MSG_REQUEST_FAILURE: 'request-failure',
+ MSG_CHANNEL_OPEN: 'channel-open',
+ MSG_CHANNEL_OPEN_SUCCESS: 'channel-open-success',
+ MSG_CHANNEL_OPEN_FAILURE: 'channel-open-failure',
+ MSG_CHANNEL_WINDOW_ADJUST: 'channel-window-adjust',
+ MSG_CHANNEL_DATA: 'channel-data',
+ MSG_CHANNEL_EXTENDED_DATA: 'channel-extended-data',
+ MSG_CHANNEL_EOF: 'channel-eof',
+ MSG_CHANNEL_CLOSE: 'channel-close',
+ MSG_CHANNEL_REQUEST: 'channel-request',
+ MSG_CHANNEL_SUCCESS: 'channel-success',
+ MSG_CHANNEL_FAILURE: 'channel-failure'
+ }
+
+
+# authentication request return codes:
+AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3)
+
+
+# channel request failed reasons:
+(OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
+ OPEN_FAILED_RESOURCE_SHORTAGE) = range(0, 5)
+
+
+CONNECTION_FAILED_CODE = {
+ 1: 'Administratively prohibited',
+ 2: 'Connect failed',
+ 3: 'Unknown channel type',
+ 4: 'Resource shortage'
+}
+
+
+DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_AUTH_CANCELLED_BY_USER, \
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 7, 13, 14
+
+
+from Crypto.Util.randpool import PersistentRandomPool, RandomPool
+
+# keep a crypto-strong PRNG nearby
+try:
+ randpool = PersistentRandomPool(os.path.join(os.path.expanduser('~'), '/.randpool'))
+except:
+ # the above will likely fail on Windows - fall back to non-persistent random pool
+ randpool = RandomPool()
+
+try:
+ randpool.randomize()
+except:
+ # earlier versions of pyCrypto (pre-2.0) don't have randomize()
+ pass
+
+import sys
+if sys.version_info < (2, 3):
+ try:
+ import logging
+ except:
+ import logging22 as logging
+ import select
+ PY22 = True
+
+ import socket
+ if not hasattr(socket, 'timeout'):
+ class timeout(socket.error): pass
+ socket.timeout = timeout
+ del timeout
+else:
+ import logging
+ PY22 = False
+
+DEBUG = logging.DEBUG
+INFO = logging.INFO
+WARNING = logging.WARNING
+ERROR = logging.ERROR
+CRITICAL = logging.CRITICAL
diff --git a/paramiko/compress.py b/paramiko/compress.py
new file mode 100644
index 0000000..bdf4b42
--- /dev/null
+++ b/paramiko/compress.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Compression implementations for a Transport.
+"""
+
+import zlib
+
+
+class ZlibCompressor (object):
+ def __init__(self):
+ self.z = zlib.compressobj(9)
+
+ def __call__(self, data):
+ return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
+
+
+class ZlibDecompressor (object):
+ def __init__(self):
+ self.z = zlib.decompressobj()
+
+ def __call__(self, data):
+ return self.z.decompress(data)
diff --git a/paramiko/dsskey.py b/paramiko/dsskey.py
new file mode 100644
index 0000000..2b31372
--- /dev/null
+++ b/paramiko/dsskey.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{DSSKey}
+"""
+
+from Crypto.PublicKey import DSA
+from Crypto.Hash import SHA
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.ssh_exception import SSHException
+from paramiko.message import Message
+from paramiko.ber import BER, BERException
+from paramiko.pkey import PKey
+
+
+class DSSKey (PKey):
+ """
+ Representation of a DSS key which can be used to sign an verify SSH2
+ data.
+ """
+
+ def __init__(self, msg=None, data=None, filename=None, password=None, vals=None):
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if vals is not None:
+ self.p, self.q, self.g, self.y = vals
+ else:
+ if msg is None:
+ raise SSHException('Key object may not be empty')
+ if msg.get_string() != 'ssh-dss':
+ raise SSHException('Invalid key')
+ self.p = msg.get_mpint()
+ self.q = msg.get_mpint()
+ self.g = msg.get_mpint()
+ self.y = msg.get_mpint()
+ self.size = util.bit_length(self.p)
+
+ def __str__(self):
+ m = Message()
+ m.add_string('ssh-dss')
+ m.add_mpint(self.p)
+ m.add_mpint(self.q)
+ m.add_mpint(self.g)
+ m.add_mpint(self.y)
+ return str(m)
+
+ def __hash__(self):
+ h = hash(self.get_name())
+ h = h * 37 + hash(self.p)
+ h = h * 37 + hash(self.q)
+ h = h * 37 + hash(self.g)
+ h = h * 37 + hash(self.y)
+ # h might be a long by now...
+ return hash(h)
+
+ def get_name(self):
+ return 'ssh-dss'
+
+ def get_bits(self):
+ return self.size
+
+ def can_sign(self):
+ return hasattr(self, 'x')
+
+ def sign_ssh_data(self, rpool, data):
+ digest = SHA.new(data).digest()
+ dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x)))
+ # generate a suitable k
+ qsize = len(util.deflate_long(self.q, 0))
+ while True:
+ k = util.inflate_long(rpool.get_bytes(qsize), 1)
+ if (k > 2) and (k < self.q):
+ break
+ r, s = dss.sign(util.inflate_long(digest, 1), k)
+ m = Message()
+ m.add_string('ssh-dss')
+ # apparently, in rare cases, r or s may be shorter than 20 bytes!
+ rstr = util.deflate_long(r, 0)
+ sstr = util.deflate_long(s, 0)
+ if len(rstr) < 20:
+ rstr = '\x00' * (20 - len(rstr)) + rstr
+ if len(sstr) < 20:
+ sstr = '\x00' * (20 - len(sstr)) + sstr
+ m.add_string(rstr + sstr)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if len(str(msg)) == 40:
+ # spies.com bug: signature has no header
+ sig = str(msg)
+ else:
+ kind = msg.get_string()
+ if kind != 'ssh-dss':
+ return 0
+ sig = msg.get_string()
+
+ # pull out (r, s) which are NOT encoded as mpints
+ sigR = util.inflate_long(sig[:20], 1)
+ sigS = util.inflate_long(sig[20:], 1)
+ sigM = util.inflate_long(SHA.new(data).digest(), 1)
+
+ dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q)))
+ return dss.verify(sigM, (sigR, sigS))
+
+ def write_private_key_file(self, filename, password=None):
+ keylist = [ 0, self.p, self.q, self.g, self.y, self.x ]
+ try:
+ b = BER()
+ b.encode(keylist)
+ except BERException:
+ raise SSHException('Unable to create ber encoding of key')
+ self._write_private_key_file('DSA', filename, str(b), password)
+
+ def generate(bits=1024, progress_func=None):
+ """
+ Generate a new private DSS key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ @param bits: number of bits the generated key should be.
+ @type bits: int
+ @param progress_func: an optional function to call at key points in
+ key generation (used by C{pyCrypto.PublicKey}).
+ @type progress_func: function
+ @return: new private key
+ @rtype: L{DSSKey}
+
+ @since: fearow
+ """
+ randpool.stir()
+ dsa = DSA.generate(bits, randpool.get_bytes, progress_func)
+ key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
+ key.x = dsa.x
+ return key
+ generate = staticmethod(generate)
+
+
+ ### internals...
+
+
+ def _from_private_key_file(self, filename, password):
+ # private key file contains:
+ # DSAPrivateKey = { version = 0, p, q, g, y, x }
+ data = self._read_private_key_file('DSA', filename, password)
+ try:
+ keylist = BER(data).decode()
+ except BERException, x:
+ raise SSHException('Unable to parse key file: ' + str(x))
+ if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0):
+ raise SSHException('not a valid DSA private key file (bad ber encoding)')
+ self.p = keylist[1]
+ self.q = keylist[2]
+ self.g = keylist[3]
+ self.y = keylist[4]
+ self.x = keylist[5]
+ self.size = util.bit_length(self.p)
diff --git a/paramiko/file.py b/paramiko/file.py
new file mode 100644
index 0000000..c29e7c4
--- /dev/null
+++ b/paramiko/file.py
@@ -0,0 +1,440 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+BufferedFile.
+"""
+
+from cStringIO import StringIO
+
+
+_FLAG_READ = 0x1
+_FLAG_WRITE = 0x2
+_FLAG_APPEND = 0x4
+_FLAG_BINARY = 0x10
+_FLAG_BUFFERED = 0x20
+_FLAG_LINE_BUFFERED = 0x40
+_FLAG_UNIVERSAL_NEWLINE = 0x80
+
+
+class BufferedFile (object):
+ """
+ Reusable base class to implement python-style file buffering around a
+ simpler stream.
+ """
+
+ _DEFAULT_BUFSIZE = 8192
+
+ SEEK_SET = 0
+ SEEK_CUR = 1
+ SEEK_END = 2
+
+ def __init__(self):
+ self._flags = 0
+ self._bufsize = self._DEFAULT_BUFSIZE
+ self._wbuffer = StringIO()
+ self._rbuffer = ''
+ self._at_trailing_cr = False
+ self._closed = False
+ # pos - position within the file, according to the user
+ # realpos - position according the OS
+ # (these may be different because we buffer for line reading)
+ self._pos = self._realpos = 0
+
+ def __del__(self):
+ self.close()
+
+ def __iter__(self):
+ """
+ Returns an iterator that can be used to iterate over the lines in this
+ file. This iterator happens to return the file itself, since a file is
+ its own iterator.
+
+ @raise ValueError: if the file is closed.
+
+ @return: an interator.
+ @rtype: iterator
+ """
+ if self._closed:
+ raise ValueError('I/O operation on closed file')
+ return self
+
+ def close(self):
+ """
+ Close the file. Future read and write operations will fail.
+ """
+ self.flush()
+ self._closed = True
+
+ def flush(self):
+ """
+ Write out any data in the write buffer. This may do nothing if write
+ buffering is not turned on.
+ """
+ self._write_all(self._wbuffer.getvalue())
+ self._wbuffer = StringIO()
+ return
+
+ def next(self):
+ """
+ Returns the next line from the input, or raises L{StopIteration} when
+ EOF is hit. Unlike python file objects, it's okay to mix calls to
+ C{next} and L{readline}.
+
+ @raise StopIteration: when the end of the file is reached.
+
+ @return: a line read from the file.
+ @rtype: str
+ """
+ line = self.readline()
+ if not line:
+ raise StopIteration
+ return line
+
+ def read(self, size=None):
+ """
+ Read at most C{size} bytes from the file (less if we hit the end of the
+ file first). If the C{size} argument is negative or omitted, read all
+ the remaining data in the file.
+
+ @param size: maximum number of bytes to read.
+ @type size: int
+ @return: data read from the file, or an empty string if EOF was
+ encountered immediately.
+ @rtype: str
+ """
+ if self._closed:
+ raise IOError('File is closed')
+ if not (self._flags & _FLAG_READ):
+ raise IOError('File not open for reading')
+ if (size is None) or (size < 0):
+ # go for broke
+ result = self._rbuffer
+ self._rbuffer = ''
+ self._pos += len(result)
+ while True:
+ try:
+ new_data = self._read(self._DEFAULT_BUFSIZE)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ result += new_data
+ self._realpos += len(new_data)
+ self._pos += len(new_data)
+ return result
+ if size <= len(self._rbuffer):
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+ while len(self._rbuffer) < size:
+ try:
+ new_data = self._read(max(self._bufsize, size - len(self._rbuffer)))
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ self._rbuffer += new_data
+ self._realpos += len(new_data)
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+
+ def readline(self, size=None):
+ """
+ Read one entire line from the file. A trailing newline character is
+ kept in the string (but may be absent when a file ends with an
+ incomplete line). If the size argument is present and non-negative, it
+ is a maximum byte count (including the trailing newline) and an
+ incomplete line may be returned. An empty string is returned only when
+ EOF is encountered immediately.
+
+ @note: Unlike stdio's C{fgets()}, the returned string contains null
+ characters (C{'\\0'}) if they occurred in the input.
+
+ @param size: maximum length of returned string.
+ @type size: int
+ @return: next line of the file, or an empty string if the end of the
+ file has been reached.
+ @rtype: str
+ """
+ # it's almost silly how complex this function is.
+ if self._closed:
+ raise IOError('File is closed')
+ if not (self._flags & _FLAG_READ):
+ raise IOError('File not open for reading')
+ line = self._rbuffer
+ while True:
+ if self._at_trailing_cr and (self._flags & _FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0):
+ # edge case: the newline may be '\r\n' and we may have read
+ # only the first '\r' last time.
+ if line[0] == '\n':
+ line = line[1:]
+ self._record_newline('\r\n')
+ else:
+ self._record_newline('\r')
+ self._at_trailing_cr = False
+ # check size before looking for a linefeed, in case we already have
+ # enough.
+ if (size is not None) and (size >= 0):
+ if len(line) >= size:
+ # truncate line and return
+ self._rbuffer = line[size:]
+ line = line[:size]
+ self._pos += len(line)
+ return line
+ n = size - len(line)
+ else:
+ n = self._DEFAULT_BUFSIZE
+ if ('\n' in line) or ((self._flags & _FLAG_UNIVERSAL_NEWLINE) and ('\r' in line)):
+ break
+ try:
+ new_data = self._read(n)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ self._rbuffer = ''
+ self._pos += len(line)
+ return line
+ line += new_data
+ self._realpos += len(new_data)
+ # find the newline
+ pos = line.find('\n')
+ if self._flags & _FLAG_UNIVERSAL_NEWLINE:
+ rpos = line.find('\r')
+ if (rpos >= 0) and ((rpos < pos) or (pos < 0)):
+ pos = rpos
+ xpos = pos + 1
+ if (line[pos] == '\r') and (xpos < len(line)) and (line[xpos] == '\n'):
+ xpos += 1
+ self._rbuffer = line[xpos:]
+ lf = line[pos:xpos]
+ line = line[:pos] + '\n'
+ if (len(self._rbuffer) == 0) and (lf == '\r'):
+ # we could read the line up to a '\r' and there could still be a
+ # '\n' following that we read next time. note that and eat it.
+ self._at_trailing_cr = True
+ else:
+ self._record_newline(lf)
+ self._pos += len(line)
+ return line
+
+ def readlines(self, sizehint=None):
+ """
+ Read all remaining lines using L{readline} and return them as a list.
+ If the optional C{sizehint} argument is present, instead of reading up
+ to EOF, whole lines totalling approximately sizehint bytes (possibly
+ after rounding up to an internal buffer size) are read.
+
+ @param sizehint: desired maximum number of bytes to read.
+ @type sizehint: int
+ @return: list of lines read from the file.
+ @rtype: list
+ """
+ lines = []
+ bytes = 0
+ while 1:
+ line = self.readline()
+ if len(line) == 0:
+ break
+ lines.append(line)
+ bytes += len(line)
+ if (sizehint is not None) and (bytes >= sizehint):
+ break
+ return lines
+
+ def seek(self, offset, whence=0):
+ """
+ Set the file's current position, like stdio's C{fseek}. Not all file
+ objects support seeking.
+
+ @note: If a file is opened in append mode (C{'a'} or C{'a+'}), any seek
+ operations will be undone at the next write (as the file position
+ will move back to the end of the file).
+
+ @param offset: position to move to within the file, relative to
+ C{whence}.
+ @type offset: int
+ @param whence: type of movement: 0 = absolute; 1 = relative to the
+ current position; 2 = relative to the end of the file.
+ @type whence: int
+
+ @raise IOError: if the file doesn't support random access.
+ """
+ raise IOError('File does not support seeking.')
+
+ def tell(self):
+ """
+ Return the file's current position. This may not be accurate or
+ useful if the underlying file doesn't support random access, or was
+ opened in append mode.
+
+ @return: file position (in bytes).
+ @rtype: int
+ """
+ return self._pos
+
+ def write(self, data):
+ """
+ Write data to the file. If write buffering is on (C{bufsize} was
+ specified and non-zero), some or all of the data may not actually be
+ written yet. (Use L{flush} or L{close} to force buffered data to be
+ written out.)
+
+ @param data: data to write.
+ @type data: str
+ """
+ if self._closed:
+ raise IOError('File is closed')
+ if not (self._flags & _FLAG_WRITE):
+ raise IOError('File not open for writing')
+ if not (self._flags & _FLAG_BUFFERED):
+ self._write_all(data)
+ return
+ self._wbuffer.write(data)
+ if self._flags & _FLAG_LINE_BUFFERED:
+ # only scan the new data for linefeed, to avoid wasting time.
+ last_newline_pos = data.rfind('\n')
+ if last_newline_pos >= 0:
+ wbuf = self._wbuffer.getvalue()
+ last_newline_pos += len(wbuf) - len(data)
+ self._write_all(wbuf[:last_newline_pos + 1])
+ self._wbuffer = StringIO()
+ self._wbuffer.write(wbuf[last_newline_pos + 1:])
+ return
+ # even if we're line buffering, if the buffer has grown past the
+ # buffer size, force a flush.
+ if self._wbuffer.tell() >= self._bufsize:
+ self.flush()
+ return
+
+ def writelines(self, sequence):
+ """
+ Write a sequence of strings to the file. The sequence can be any
+ iterable object producing strings, typically a list of strings. (The
+ name is intended to match L{readlines}; C{writelines} does not add line
+ separators.)
+
+ @param sequence: an iterable sequence of strings.
+ @type sequence: sequence
+ """
+ for line in sequence:
+ self.write(line)
+ return
+
+ def xreadlines(self):
+ """
+ Identical to C{iter(f)}. This is a deprecated file interface that
+ predates python iterator support.
+
+ @return: an iterator.
+ @rtype: iterator
+ """
+ return self
+
+
+ ### overrides...
+
+
+ def _read(self, size):
+ """
+ I{(subclass override)}
+ Read data from the stream. Return C{None} or raise C{EOFError} to
+ indicate EOF.
+ """
+ raise EOFError()
+
+ def _write(self, data):
+ """
+ I{(subclass override)}
+ Write data into the stream.
+ """
+ raise IOError('write not implemented')
+
+ def _get_size(self):
+ """
+ I{(subclass override)}
+ Return the size of the file. This is called from within L{_set_mode}
+ if the file is opened in append mode, so the file position can be
+ tracked and L{seek} and L{tell} will work correctly. If the file is
+ a stream that can't be randomly accessed, you don't need to override
+ this method,
+ """
+ return 0
+
+
+ ### internals...
+
+
+ def _set_mode(self, mode='r', bufsize=-1):
+ """
+ Subclasses call this method to initialize the BufferedFile.
+ """
+ if bufsize == 1:
+ # apparently, line buffering only affects writes. reads are only
+ # buffered if you call readline (directly or indirectly: iterating
+ # over a file will indirectly call readline).
+ self._flags |= _FLAG_BUFFERED | _FLAG_LINE_BUFFERED
+ elif bufsize > 1:
+ self._bufsize = bufsize
+ self._flags |= _FLAG_BUFFERED
+ if ('r' in mode) or ('+' in mode):
+ self._flags |= _FLAG_READ
+ if ('w' in mode) or ('+' in mode):
+ self._flags |= _FLAG_WRITE
+ if ('a' in mode):
+ self._flags |= _FLAG_WRITE | _FLAG_APPEND
+ self._size = self._get_size()
+ self._pos = self._realpos = self._size
+ if ('b' in mode):
+ self._flags |= _FLAG_BINARY
+ if ('U' in mode):
+ self._flags |= _FLAG_UNIVERSAL_NEWLINE
+ # built-in file objects have this attribute to store which kinds of
+ # line terminations they've seen:
+ # <http://www.python.org/doc/current/lib/built-in-funcs.html>
+ self.newlines = None
+
+ def _write_all(self, data):
+ # the underlying stream may be something that does partial writes (like
+ # a socket).
+ while len(data) > 0:
+ count = self._write(data)
+ data = data[count:]
+ if self._flags & _FLAG_APPEND:
+ self._size += count
+ self._pos = self._realpos = self._size
+ else:
+ self._pos += count
+ self._realpos += count
+ return None
+
+ def _record_newline(self, newline):
+ # silliness about tracking what kinds of newlines we've seen.
+ # i don't understand why it can be None, a string, or a tuple, instead
+ # of just always being a tuple, but we'll emulate that behavior anyway.
+ if not (self._flags & _FLAG_UNIVERSAL_NEWLINE):
+ return
+ if self.newlines is None:
+ self.newlines = newline
+ elif (type(self.newlines) is str) and (self.newlines != newline):
+ self.newlines = (self.newlines, newline)
+ elif newline not in self.newlines:
+ self.newlines += (newline,)
diff --git a/paramiko/kex_gex.py b/paramiko/kex_gex.py
new file mode 100644
index 0000000..994d76c
--- /dev/null
+++ b/paramiko/kex_gex.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Variant on L{KexGroup1 <paramiko.kex_group1.KexGroup1>} where the prime "p" and
+generator "g" are provided by the server. A bit more work is required on the
+client side, and a B{lot} more on the server side.
+"""
+
+from Crypto.Hash import SHA
+from Crypto.Util import number
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXDH_GEX_GROUP, _MSG_KEXDH_GEX_INIT, _MSG_KEXDH_GEX_REPLY, _MSG_KEXDH_GEX_REQUEST = range(31, 35)
+
+
+class KexGex (object):
+
+ name = 'diffie-hellman-group-exchange-sha1'
+ min_bits = 1024
+ max_bits = 8192
+ preferred_bits = 2048
+
+ def __init__(self, transport):
+ self.transport = transport
+
+ def start_kex(self):
+ if self.transport.server_mode:
+ self.transport._expect_packet(_MSG_KEXDH_GEX_REQUEST)
+ return
+ # request a bit range: we accept (min_bits) to (max_bits), but prefer
+ # (preferred_bits). according to the spec, we shouldn't pull the
+ # minimum up above 1024.
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_GEX_REQUEST))
+ m.add_int(self.min_bits)
+ m.add_int(self.preferred_bits)
+ m.add_int(self.max_bits)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
+
+ def parse_next(self, ptype, m):
+ if ptype == _MSG_KEXDH_GEX_REQUEST:
+ return self._parse_kexdh_gex_request(m)
+ elif ptype == _MSG_KEXDH_GEX_GROUP:
+ return self._parse_kexdh_gex_group(m)
+ elif ptype == _MSG_KEXDH_GEX_INIT:
+ return self._parse_kexdh_gex_init(m)
+ elif ptype == _MSG_KEXDH_GEX_REPLY:
+ return self._parse_kexdh_gex_reply(m)
+ raise SSHException('KexGex asked to handle packet type %d' % ptype)
+
+
+ ### internals...
+
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < (p-1)/2).
+ q = (self.p - 1) // 2
+ qnorm = util.deflate_long(q, 0)
+ qhbyte = ord(qnorm[0])
+ bytes = len(qnorm)
+ qmask = 0xff
+ while not (qhbyte & 0x80):
+ qhbyte <<= 1
+ qmask >>= 1
+ while True:
+ self.transport.randpool.stir()
+ x_bytes = self.transport.randpool.get_bytes(bytes)
+ x_bytes = chr(ord(x_bytes[0]) & qmask) + x_bytes[1:]
+ x = util.inflate_long(x_bytes, 1)
+ if (x > 1) and (x < q):
+ break
+ self.x = x
+
+ def _parse_kexdh_gex_request(self, m):
+ minbits = m.get_int()
+ preferredbits = m.get_int()
+ maxbits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if preferredbits > self.max_bits:
+ preferredbits = self.max_bits
+ if preferredbits < self.min_bits:
+ preferredbits = self.min_bits
+ # fix min/max if they're inconsistent. technically, we could just pout
+ # and hang up, but there's no harm in giving them the benefit of the
+ # doubt and just picking a bitsize for them.
+ if minbits > preferredbits:
+ minbits = preferredbits
+ if maxbits < preferredbits:
+ maxbits = preferredbits
+ # now save a copy
+ self.min_bits = minbits
+ self.preferred_bits = preferredbits
+ self.max_bits = maxbits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException('Can\'t do server-side gex with no modulus pack')
+ self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits))
+ self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_GEX_GROUP))
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
+
+ def _parse_kexdh_gex_group(self, m):
+ self.p = m.get_mpint()
+ self.g = m.get_mpint()
+ # reject if p's bit length < 1024 or > 8192
+ bitlen = util.bit_length(self.p)
+ if (bitlen < 1024) or (bitlen > 8192):
+ raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen)
+ self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen)
+ self._generate_x()
+ # now compute e = g^x mod p
+ self.e = pow(self.g, self.x, self.p)
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_GEX_INIT))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
+
+ def _parse_kexdh_gex_init(self, m):
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.p - 1):
+ raise SSHException('Client kex "e" is out of range')
+ self._generate_x()
+ self.f = pow(self.g, self.x, self.p)
+ K = pow(self.e, self.x, self.p)
+ key = str(self.transport.get_server_key())
+ # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ hm = Message()
+ hm.add(self.transport.remote_version, self.transport.local_version,
+ self.transport.remote_kex_init, self.transport.local_kex_init,
+ key)
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = SHA.new(str(hm)).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(self.transport.randpool, H)
+ # send reply
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_GEX_REPLY))
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(str(sig))
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_gex_reply(self, m):
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ sig = m.get_string()
+ if (self.f < 1) or (self.f > self.p - 1):
+ raise SSHException('Server kex "f" is out of range')
+ K = pow(self.f, self.x, self.p)
+ # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ hm = Message()
+ hm.add(self.transport.local_version, self.transport.remote_version,
+ self.transport.local_kex_init, self.transport.remote_kex_init,
+ host_key)
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, SHA.new(str(hm)).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
diff --git a/paramiko/kex_group1.py b/paramiko/kex_group1.py
new file mode 100644
index 0000000..a13cf3a
--- /dev/null
+++ b/paramiko/kex_group1.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+1024 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+from Crypto.Hash import SHA
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
+
+# draft-ietf-secsh-transport-09.txt, page 17
+P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFFL
+G = 2
+
+
+class KexGroup1(object):
+
+ name = 'diffie-hellman-group1-sha1'
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.x = 0L
+ self.e = 0L
+ self.f = 0L
+
+ def start_kex(self):
+ self._generate_x()
+ if self.transport.server_mode:
+ # compute f = g^x mod p, but don't send it yet
+ self.f = pow(G, self.x, P)
+ self.transport._expect_packet(_MSG_KEXDH_INIT)
+ return
+ # compute e = g^x mod p (where g=2), and send it
+ self.e = pow(G, self.x, P)
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_INIT))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
+ return self._parse_kexdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
+ return self._parse_kexdh_reply(m)
+ raise SSHException('KexGroup1 asked to handle packet type %d' % ptype)
+
+
+ ### internals...
+
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < q), where q is (p-1)/2.
+ # p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ # therefore q can be approximated as a 2^1023. we drop the subset of
+ # potential x where the first 63 bits are 1, because some of those will be
+ # larger than q (but this is a tiny tiny subset of potential x).
+ while 1:
+ self.transport.randpool.stir()
+ x_bytes = self.transport.randpool.get_bytes(128)
+ x_bytes = chr(ord(x_bytes[0]) & 0x7f) + x_bytes[1:]
+ if (x_bytes[:8] != '\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF') and \
+ (x_bytes[:8] != '\x00\x00\x00\x00\x00\x00\x00\x00'):
+ break
+ self.x = util.inflate_long(x_bytes)
+
+ def _parse_kexdh_reply(self, m):
+ # client mode
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ if (self.f < 1) or (self.f > P - 1):
+ raise SSHException('Server kex "f" is out of range')
+ sig = m.get_string()
+ K = pow(self.f, self.x, P)
+ # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(self.transport.local_version, self.transport.remote_version,
+ self.transport.local_kex_init, self.transport.remote_kex_init)
+ hm.add_string(host_key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, SHA.new(str(hm)).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_init(self, m):
+ # server mode
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > P - 1):
+ raise SSHException('Client kex "e" is out of range')
+ K = pow(self.e, self.x, P)
+ key = str(self.transport.get_server_key())
+ # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(self.transport.remote_version, self.transport.local_version,
+ self.transport.remote_kex_init, self.transport.local_kex_init)
+ hm.add_string(key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = SHA.new(str(hm)).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(self.transport.randpool, H)
+ # send reply
+ m = Message()
+ m.add_byte(chr(_MSG_KEXDH_REPLY))
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(str(sig))
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
diff --git a/paramiko/logging22.py b/paramiko/logging22.py
new file mode 100644
index 0000000..ac11a73
--- /dev/null
+++ b/paramiko/logging22.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Stub out logging on python < 2.3.
+"""
+
+
+DEBUG = 10
+INFO = 20
+WARNING = 30
+ERROR = 40
+CRITICAL = 50
+
+
+def getLogger(name):
+ return _logger
+
+
+class logger (object):
+ def __init__(self):
+ self.handlers = [ ]
+ self.level = ERROR
+
+ def setLevel(self, level):
+ self.level = level
+
+ def addHandler(self, h):
+ self.handlers.append(h)
+
+ def addFilter(self, filter):
+ pass
+
+ def log(self, level, text):
+ if level >= self.level:
+ for h in self.handlers:
+ h.f.write(text + '\n')
+ h.f.flush()
+
+class StreamHandler (object):
+ def __init__(self, f):
+ self.f = f
+
+ def setFormatter(self, f):
+ pass
+
+class Formatter (object):
+ def __init__(self, x, y):
+ pass
+
+_logger = logger()
diff --git a/paramiko/message.py b/paramiko/message.py
new file mode 100644
index 0000000..1d75a01
--- /dev/null
+++ b/paramiko/message.py
@@ -0,0 +1,301 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Implementation of an SSH2 "message".
+"""
+
+import struct
+import cStringIO
+
+from paramiko import util
+
+
+class Message (object):
+ """
+ An SSH2 I{Message} is a stream of bytes that encodes some combination of
+ strings, integers, bools, and infinite-precision integers (known in python
+ as I{long}s). This class builds or breaks down such a byte stream.
+
+ Normally you don't need to deal with anything this low-level, but it's
+ exposed for people implementing custom extensions, or features that
+ paramiko doesn't support yet.
+ """
+
+ def __init__(self, content=None):
+ """
+ Create a new SSH2 Message.
+
+ @param content: the byte stream to use as the Message content (passed
+ in only when decomposing a Message).
+ @type content: string
+ """
+ if content != None:
+ self.packet = cStringIO.StringIO(content)
+ else:
+ self.packet = cStringIO.StringIO()
+
+ def __str__(self):
+ """
+ Return the byte stream content of this Message, as a string.
+
+ @return: the contents of this Message.
+ @rtype: string
+ """
+ return self.packet.getvalue()
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+
+ @rtype: string
+ """
+ return 'paramiko.Message(' + repr(self.packet.getvalue()) + ')'
+
+ def rewind(self):
+ """
+ Rewind the message to the beginning as if no items had been parsed
+ out of it yet.
+ """
+ self.packet.seek(0)
+
+ def get_remainder(self):
+ """
+ Return the bytes of this Message that haven't already been parsed and
+ returned.
+
+ @return: a string of the bytes not parsed yet.
+ @rtype: string
+ """
+ position = self.packet.tell()
+ remainder = self.packet.read()
+ self.packet.seek(position)
+ return remainder
+
+ def get_so_far(self):
+ """
+ Returns the bytes of this Message that have been parsed and returned.
+ The string passed into a Message's constructor can be regenerated by
+ concatenating C{get_so_far} and L{get_remainder}.
+
+ @return: a string of the bytes parsed so far.
+ @rtype: string
+ """
+ position = self.packet.tell()
+ self.rewind()
+ return self.packet.read(position)
+
+ def get_bytes(self, n):
+ """
+ Return the next C{n} bytes of the Message, without decomposing into
+ an int, string, etc. Just the raw bytes are returned.
+
+ @return: a string of the next C{n} bytes of the Message, or a string
+ of C{n} zero bytes, if there aren't C{n} bytes remaining.
+ @rtype: string
+ """
+ b = self.packet.read(n)
+ if len(b) < n:
+ return '\x00'*n
+ return b
+
+ def get_byte(self):
+ """
+ Return the next byte of the Message, without decomposing it. This
+ is equivalent to L{get_bytes(1)<get_bytes>}.
+
+ @return: the next byte of the Message, or C{'\000'} if there aren't
+ any bytes remaining.
+ @rtype: string
+ """
+ return self.get_bytes(1)
+
+ def get_boolean(self):
+ """
+ Fetch a boolean from the stream.
+
+ @return: C{True} or C{False} (from the Message).
+ @rtype: bool
+ """
+ b = self.get_bytes(1)
+ return b != '\x00'
+
+ def get_int(self):
+ """
+ Fetch an int from the stream.
+
+ @return: a 32-bit unsigned integer.
+ @rtype: int
+ """
+ return struct.unpack('>I', self.get_bytes(4))[0]
+
+ def get_int64(self):
+ """
+ Fetch a 64-bit int from the stream.
+
+ @return: a 64-bit unsigned integer.
+ @rtype: long
+ """
+ return struct.unpack('>Q', self.get_bytes(8))[0]
+
+ def get_mpint(self):
+ """
+ Fetch a long int (mpint) from the stream.
+
+ @return: an arbitrary-length integer.
+ @rtype: long
+ """
+ return util.inflate_long(self.get_string())
+
+ def get_string(self):
+ """
+ Fetch a string from the stream. This could be a byte string and may
+ contain unprintable characters. (It's not unheard of for a string to
+ contain another byte-stream Message.)
+
+ @return: a string.
+ @rtype: string
+ """
+ return self.get_bytes(self.get_int())
+
+ def get_list(self):
+ """
+ Fetch a list of strings from the stream. These are trivially encoded
+ as comma-separated values in a string.
+
+ @return: a list of strings.
+ @rtype: list of strings
+ """
+ return self.get_string().split(',')
+
+ def add_bytes(self, b):
+ """
+ Write bytes to the stream, without any formatting.
+
+ @param b: bytes to add
+ @type b: str
+ """
+ self.packet.write(b)
+ return self
+
+ def add_byte(self, b):
+ """
+ Write a single byte to the stream, without any formatting.
+
+ @param b: byte to add
+ @type b: str
+ """
+ self.packet.write(b)
+ return self
+
+ def add_boolean(self, b):
+ """
+ Add a boolean value to the stream.
+
+ @param b: boolean value to add
+ @type b: bool
+ """
+ if b:
+ self.add_byte('\x01')
+ else:
+ self.add_byte('\x00')
+ return self
+
+ def add_int(self, n):
+ """
+ Add an integer to the stream.
+
+ @param n: integer to add
+ @type n: int
+ """
+ self.packet.write(struct.pack('>I', n))
+ return self
+
+ def add_int64(self, n):
+ """
+ Add a 64-bit int to the stream.
+
+ @param n: long int to add
+ @type n: long
+ """
+ self.packet.write(struct.pack('>Q', n))
+ return self
+
+ def add_mpint(self, z):
+ """
+ Add a long int to the stream, encoded as an infinite-precision
+ integer. This method only works on positive numbers.
+
+ @param z: long int to add
+ @type z: long
+ """
+ self.add_string(util.deflate_long(z))
+ return self
+
+ def add_string(self, s):
+ """
+ Add a string to the stream.
+
+ @param s: string to add
+ @type s: str
+ """
+ self.add_int(len(s))
+ self.packet.write(s)
+ return self
+
+ def add_list(self, l):
+ """
+ Add a list of strings to the stream. They are encoded identically to
+ a single string of values separated by commas. (Yes, really, that's
+ how SSH2 does it.)
+
+ @param l: list of strings to add
+ @type l: list(str)
+ """
+ self.add_string(','.join(l))
+ return self
+
+ def _add(self, i):
+ if type(i) is str:
+ return self.add_string(i)
+ elif type(i) is int:
+ return self.add_int(i)
+ elif type(i) is long:
+ if i > 0xffffffffL:
+ return self.add_mpint(i)
+ else:
+ return self.add_int(i)
+ elif type(i) is bool:
+ return self.add_boolean(i)
+ elif type(i) is list:
+ return self.add_list(i)
+ else:
+ raise exception('Unknown type')
+
+ def add(self, *seq):
+ """
+ Add a sequence of items to the stream. The values are encoded based
+ on their type: str, int, bool, list, or long.
+
+ @param seq: the sequence of items
+ @type seq: sequence
+
+ @bug: longs are encoded non-deterministically. Don't use this method.
+ """
+ for item in seq:
+ self._add(item)
diff --git a/paramiko/packet.py b/paramiko/packet.py
new file mode 100644
index 0000000..277d68e
--- /dev/null
+++ b/paramiko/packet.py
@@ -0,0 +1,442 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Packetizer.
+"""
+
+import select
+import socket
+import struct
+import threading
+import time
+from Crypto.Hash import HMAC
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.ssh_exception import SSHException
+from paramiko.message import Message
+
+
+class NeedRekeyException (Exception):
+ pass
+
+
+class Packetizer (object):
+ """
+ Implementation of the base SSH packet protocol.
+ """
+
+ # READ the secsh RFC's before raising these values. if anything,
+ # they should probably be lower.
+ REKEY_PACKETS = pow(2, 30)
+ REKEY_BYTES = pow(2, 30)
+
+ def __init__(self, socket):
+ self.__socket = socket
+ self.__logger = None
+ self.__closed = False
+ self.__dump_packets = False
+ self.__need_rekey = False
+ self.__init_count = 0
+
+ # used for noticing when to re-key:
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_packets_overflow = 0
+
+ # current inbound/outbound ciphering:
+ self.__block_size_out = 8
+ self.__block_size_in = 8
+ self.__mac_size_out = 0
+ self.__mac_size_in = 0
+ self.__block_engine_out = None
+ self.__block_engine_in = None
+ self.__mac_engine_out = None
+ self.__mac_engine_in = None
+ self.__mac_key_out = ''
+ self.__mac_key_in = ''
+ self.__compress_engine_out = None
+ self.__compress_engine_in = None
+ self.__sequence_number_out = 0L
+ self.__sequence_number_in = 0L
+
+ # lock around outbound writes (packet computation)
+ self.__write_lock = threading.RLock()
+
+ # keepalives:
+ self.__keepalive_interval = 0
+ self.__keepalive_last = time.time()
+ self.__keepalive_callback = None
+
+ def __del__(self):
+ # this is not guaranteed to be called, but we should try.
+ try:
+ self.__socket.close()
+ except:
+ pass
+
+ def set_log(self, log):
+ """
+ Set the python log object to use for logging.
+ """
+ self.__logger = log
+
+ def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
+ """
+ Switch outbound data cipher.
+ """
+ self.__block_engine_out = block_engine
+ self.__block_size_out = block_size
+ self.__mac_engine_out = mac_engine
+ self.__mac_size_out = mac_size
+ self.__mac_key_out = mac_key
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ # wait until the reset happens in both directions before clearing rekey flag
+ self.__init_count |= 1
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
+ """
+ Switch inbound data cipher.
+ """
+ self.__block_engine_in = block_engine
+ self.__block_size_in = block_size
+ self.__mac_engine_in = mac_engine
+ self.__mac_size_in = mac_size
+ self.__mac_key_in = mac_key
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_packets_overflow = 0
+ # wait until the reset happens in both directions before clearing rekey flag
+ self.__init_count |= 2
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_outbound_compressor(self, compressor):
+ self.__compress_engine_out = compressor
+
+ def set_inbound_compressor(self, compressor):
+ self.__compress_engine_in = compressor
+
+ def close(self):
+ self.__closed = True
+
+ def set_hexdump(self, hexdump):
+ self.__dump_packets = hexdump
+
+ def get_hexdump(self):
+ return self.__dump_packets
+
+ def get_mac_size_in(self):
+ return self.__mac_size_in
+
+ def get_mac_size_out(self):
+ return self.__mac_size_out
+
+ def need_rekey(self):
+ """
+ Returns C{True} if a new set of keys needs to be negotiated. This
+ will be triggered during a packet read or write, so it should be
+ checked after every read or write, or at least after every few.
+
+ @return: C{True} if a new set of keys needs to be negotiated
+ """
+ return self.__need_rekey
+
+ def set_keepalive(self, interval, callback):
+ """
+ Turn on/off the callback keepalive. If C{interval} seconds pass with
+ no data read from or written to the socket, the callback will be
+ executed and the timer will be reset.
+ """
+ self.__keepalive_interval = interval
+ self.__keepalive_callback = callback
+ self.__keepalive_last = time.time()
+
+ def read_all(self, n, check_rekey=False):
+ """
+ Read as close to N bytes as possible, blocking as long as necessary.
+
+ @param n: number of bytes to read
+ @type n: int
+ @return: the data read
+ @rtype: str
+ @raise EOFError: if the socket was closed before all the bytes could
+ be read
+ """
+ if PY22:
+ return self._py22_read_all(n)
+ out = ''
+ while n > 0:
+ try:
+ x = self.__socket.recv(n)
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ except socket.timeout:
+ if self.__closed:
+ raise EOFError()
+ if check_rekey and (len(out) == 0) and self.__need_rekey:
+ raise NeedRekeyException()
+ self._check_keepalive()
+ return out
+
+ def write_all(self, out):
+ self.__keepalive_last = time.time()
+ while len(out) > 0:
+ try:
+ n = self.__socket.send(out)
+ except socket.timeout:
+ n = 0
+ if self.__closed:
+ n = -1
+ except Exception:
+ # could be: (32, 'Broken pipe')
+ n = -1
+ if n < 0:
+ raise EOFError()
+ if n == len(out):
+ return
+ out = out[n:]
+ return
+
+ def readline(self, timeout):
+ """
+ Read a line from the socket. This is done in a fairly inefficient
+ way, but is only used for initial banner negotiation so it's not worth
+ optimising.
+ """
+ buf = ''
+ while not '\n' in buf:
+ buf += self._read_timeout(timeout)
+ buf = buf[:-1]
+ if (len(buf) > 0) and (buf[-1] == '\r'):
+ buf = buf[:-1]
+ return buf
+
+ def send_message(self, data):
+ """
+ Write a block of data using the current cipher, as an SSH block.
+ """
+ # encrypt this sucka
+ randpool.stir()
+ data = str(data)
+ cmd = ord(data[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = '$%x' % cmd
+ self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, len(data)))
+ if self.__compress_engine_out is not None:
+ data = self.__compress_engine_out(data)
+ packet = self._build_packet(data)
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(packet, 'OUT: '))
+ self.__write_lock.acquire()
+ try:
+ if self.__block_engine_out != None:
+ out = self.__block_engine_out.encrypt(packet)
+ else:
+ out = packet
+ # + mac
+ if self.__block_engine_out != None:
+ payload = struct.pack('>I', self.__sequence_number_out) + packet
+ out += HMAC.HMAC(self.__mac_key_out, payload, self.__mac_engine_out).digest()[:self.__mac_size_out]
+ self.__sequence_number_out = (self.__sequence_number_out + 1) & 0xffffffffL
+ self.write_all(out)
+
+ self.__sent_bytes += len(out)
+ self.__sent_packets += 1
+ if ((self.__sent_packets >= self.REKEY_PACKETS) or (self.__sent_bytes >= self.REKEY_BYTES)) \
+ and not self.__need_rekey:
+ # only ask once for rekeying
+ self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
+ (self.__sent_packets, self.__sent_bytes))
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+ finally:
+ self.__write_lock.release()
+
+ def read_message(self):
+ """
+ Only one thread should ever be in this function (no other locking is
+ done).
+
+ @raise SSHException: if the packet is mangled
+ @raise NeedRekeyException: if the transport should rekey
+ """
+ header = self.read_all(self.__block_size_in, check_rekey=True)
+ if self.__block_engine_in != None:
+ header = self.__block_engine_in.decrypt(header)
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(header, 'IN: '));
+ packet_size = struct.unpack('>I', header[:4])[0]
+ # leftover contains decrypted bytes from the first block (after the length field)
+ leftover = header[4:]
+ if (packet_size - len(leftover)) % self.__block_size_in != 0:
+ raise SSHException('Invalid packet blocking')
+ buf = self.read_all(packet_size + self.__mac_size_in - len(leftover))
+ packet = buf[:packet_size - len(leftover)]
+ post_packet = buf[packet_size - len(leftover):]
+ if self.__block_engine_in != None:
+ packet = self.__block_engine_in.decrypt(packet)
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(packet, 'IN: '));
+ packet = leftover + packet
+
+ if self.__mac_size_in > 0:
+ mac = post_packet[:self.__mac_size_in]
+ mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet
+ my_mac = HMAC.HMAC(self.__mac_key_in, mac_payload, self.__mac_engine_in).digest()[:self.__mac_size_in]
+ if my_mac != mac:
+ raise SSHException('Mismatched MAC')
+ padding = ord(packet[0])
+ payload = packet[1:packet_size - padding]
+ randpool.add_event(packet[packet_size - padding])
+ if self.__dump_packets:
+ self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
+
+ if self.__compress_engine_in is not None:
+ payload = self.__compress_engine_in(payload)
+
+ msg = Message(payload[1:])
+ msg.seqno = self.__sequence_number_in
+ self.__sequence_number_in = (self.__sequence_number_in + 1) & 0xffffffffL
+
+ # check for rekey
+ self.__received_bytes += packet_size + self.__mac_size_in + 4
+ self.__received_packets += 1
+ if self.__need_rekey:
+ # we've asked to rekey -- give them 20 packets to comply before
+ # dropping the connection
+ self.__received_packets_overflow += 1
+ if self.__received_packets_overflow >= 20:
+ raise SSHException('Remote transport is ignoring rekey requests')
+ elif (self.__received_packets >= self.REKEY_PACKETS) or \
+ (self.__received_bytes >= self.REKEY_BYTES):
+ # only ask once for rekeying
+ self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes received)' %
+ (self.__received_packets, self.__received_bytes))
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+
+ cmd = ord(payload[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = '$%x' % cmd
+ self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
+ return cmd, msg
+
+
+ ########## protected
+
+
+ def _log(self, level, msg):
+ if self.__logger is None:
+ return
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.__logger.log(level, m)
+ else:
+ self.__logger.log(level, msg)
+
+ def _check_keepalive(self):
+ if (not self.__keepalive_interval) or (not self.__block_engine_out) or \
+ self.__need_rekey:
+ # wait till we're encrypting, and not in the middle of rekeying
+ return
+ now = time.time()
+ if now > self.__keepalive_last + self.__keepalive_interval:
+ self.__keepalive_callback()
+ self.__keepalive_last = now
+
+ def _py22_read_all(self, n):
+ out = ''
+ while n > 0:
+ r, w, e = select.select([self.__socket], [], [], 0.1)
+ if self.__socket not in r:
+ if self.__closed:
+ raise EOFError()
+ self._check_keepalive()
+ else:
+ x = self.__socket.recv(n)
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ return out
+
+ def _py22_read_timeout(self, timeout):
+ start = time.time()
+ while True:
+ r, w, e = select.select([self.__socket], [], [], 0.1)
+ if self.__socket in r:
+ x = self.__socket.recv(1)
+ if len(x) == 0:
+ raise EOFError()
+ return x
+ if self.__closed:
+ raise EOFError()
+ now = time.time()
+ if now - start >= timeout:
+ raise socket.timeout()
+
+ def _read_timeout(self, timeout):
+ if PY22:
+ return self._py22_read_timeout(n)
+ start = time.time()
+ while True:
+ try:
+ x = self.__socket.recv(1)
+ if len(x) == 0:
+ raise EOFError()
+ return x
+ except socket.timeout:
+ pass
+ if self.__closed:
+ raise EOFError()
+ now = time.time()
+ if now - start >= timeout:
+ raise socket.timeout()
+
+ def _build_packet(self, payload):
+ # pad up at least 4 bytes, to nearest block-size (usually 8)
+ bsize = self.__block_size_out
+ padding = 3 + bsize - ((len(payload) + 8) % bsize)
+ packet = struct.pack('>IB', len(payload) + padding + 1, padding)
+ packet += payload
+ if self.__block_engine_out is not None:
+ packet += randpool.get_bytes(padding)
+ else:
+ # cute trick i caught openssh doing: if we're not encrypting,
+ # don't waste random bytes for the padding
+ packet += (chr(0) * padding)
+ return packet
+
+ def _trigger_rekey(self):
+ # outside code should check for this flag
+ self.__need_rekey = True
diff --git a/paramiko/pipe.py b/paramiko/pipe.py
new file mode 100644
index 0000000..cc28f43
--- /dev/null
+++ b/paramiko/pipe.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Abstraction of a one-way pipe where the read end can be used in select().
+Normally this is trivial, but Windows makes it nearly impossible.
+"""
+
+import sys
+import os
+import socket
+
+
+def make_pipe ():
+ if sys.platform[:3] != 'win':
+ return PosixPipe()
+ return WindowsPipe()
+
+
+class PosixPipe (object):
+ def __init__ (self):
+ self._rfd, self._wfd = os.pipe()
+ self._set = False
+ self._forever = False
+
+ def close (self):
+ os.close(self._rfd)
+ os.close(self._wfd)
+
+ def fileno (self):
+ return self._rfd
+
+ def clear (self):
+ if not self._set or self._forever:
+ return
+ os.read(self._rfd, 1)
+ self._set = False
+
+ def set (self):
+ if self._set:
+ return
+ self._set = True
+ os.write(self._wfd, '*')
+
+ def set_forever (self):
+ self._forever = True
+ self.set()
+
+
+class WindowsPipe (object):
+ """
+ On Windows, only an OS-level "WinSock" may be used in select(), but reads
+ and writes must be to the actual socket object.
+ """
+ def __init__ (self):
+ serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ serv.bind(('127.0.0.1', 0))
+ serv.listen(1)
+
+ # need to save sockets in _rsock/_wsock so they don't get closed
+ self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._rsock.connect(('127.0.0.1', serv.getsockname()[1]))
+
+ self._wsock, addr = serv.accept()
+ serv.close()
+ self._set = False
+ self._forever = False
+
+ def close (self):
+ self._rsock.close()
+ self._wsock.close()
+
+ def fileno (self):
+ return self._rsock.fileno()
+
+ def clear (self):
+ if not self._set or self._forever:
+ return
+ self._rsock.recv(1)
+ self._set = False
+
+ def set (self):
+ if self._set:
+ return
+ self._set = True
+ self._wsock.send('*')
+
+ def set_forever (self):
+ self._forever = True
+ self.set()
diff --git a/paramiko/pkey.py b/paramiko/pkey.py
new file mode 100644
index 0000000..75db8e5
--- /dev/null
+++ b/paramiko/pkey.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Common API for all public keys.
+"""
+
+import base64
+import os
+
+from Crypto.Hash import MD5
+from Crypto.Cipher import DES3
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException, PasswordRequiredException
+
+
+class PKey (object):
+ """
+ Base class for public keys.
+ """
+
+ # known encryption types for private key files:
+ _CIPHER_TABLE = {
+ 'DES-EDE3-CBC': { 'cipher': DES3, 'keysize': 24, 'blocksize': 8, 'mode': DES3.MODE_CBC }
+ }
+
+
+ def __init__(self, msg=None, data=None):
+ """
+ Create a new instance of this public key type. If C{msg} is given,
+ the key's public part(s) will be filled in from the message. If
+ C{data} is given, the key's public part(s) will be filled in from
+ the string.
+
+ @param msg: an optional SSH L{Message} containing a public key of this
+ type.
+ @type msg: L{Message}
+ @param data: an optional string containing a public key of this type
+ @type data: str
+
+ @raise SSHException: if a key cannot be created from the C{data} or
+ C{msg} given, or no key was passed in.
+ """
+ pass
+
+ def __str__(self):
+ """
+ Return a string of an SSH L{Message} made up of the public part(s) of
+ this key. This string is suitable for passing to L{__init__} to
+ re-create the key object later.
+
+ @return: string representation of an SSH key message.
+ @rtype: str
+ """
+ return ''
+
+ def __cmp__(self, other):
+ """
+ Compare this key to another. Returns 0 if this key is equivalent to
+ the given key, or non-0 if they are different. Only the public parts
+ of the key are compared, so a public key will compare equal to its
+ corresponding private key.
+
+ @param other: key to compare to.
+ @type other: L{PKey}
+ @return: 0 if the two keys are equivalent, non-0 otherwise.
+ @rtype: int
+ """
+ hs = hash(self)
+ ho = hash(other)
+ if hs != ho:
+ return cmp(hs, ho)
+ return cmp(str(self), str(other))
+
+ def get_name(self):
+ """
+ Return the name of this private key implementation.
+
+ @return: name of this private key type, in SSH terminology (for
+ example, C{"ssh-rsa"}).
+ @rtype: str
+ """
+ return ''
+
+ def get_bits(self):
+ """
+ Return the number of significant bits in this key. This is useful
+ for judging the relative security of a key.
+
+ @return: bits in the key.
+ @rtype: int
+ """
+ return 0
+
+ def can_sign(self):
+ """
+ Return C{True} if this key has the private part necessary for signing
+ data.
+
+ @return: C{True} if this is a private key.
+ @rtype: bool
+ """
+ return False
+
+ def get_fingerprint(self):
+ """
+ Return an MD5 fingerprint of the public part of this key. Nothing
+ secret is revealed.
+
+ @return: a 16-byte string (binary) of the MD5 fingerprint, in SSH
+ format.
+ @rtype: str
+ """
+ return MD5.new(str(self)).digest()
+
+ def get_base64(self):
+ """
+ Return a base64 string containing the public part of this key. Nothing
+ secret is revealed. This format is compatible with that used to store
+ public key files or recognized host keys.
+
+ @return: a base64 string containing the public part of the key.
+ @rtype: str
+
+ @since: fearow
+ """
+ return base64.encodestring(str(self)).replace('\n', '')
+
+ def sign_ssh_data(self, randpool, data):
+ """
+ Sign a blob of data with this private key, and return a L{Message}
+ representing an SSH signature message.
+
+ @param randpool: a secure random number generator.
+ @type randpool: L{Crypto.Util.randpool.RandomPool}
+ @param data: the data to sign.
+ @type data: str
+ @return: an SSH signature message.
+ @rtype: L{Message}
+ """
+ return ''
+
+ def verify_ssh_sig(self, data, msg):
+ """
+ Given a blob of data, and an SSH message representing a signature of
+ that data, verify that it was signed with this key.
+
+ @param data: the data that was signed.
+ @type data: str
+ @param msg: an SSH signature message
+ @type msg: L{Message}
+ @return: C{True} if the signature verifies correctly; C{False}
+ otherwise.
+ @rtype: boolean
+ """
+ return False
+
+ def from_private_key_file(cl, filename, password=None):
+ """
+ Create a key object by reading a private key file. If the private
+ key is encrypted and C{password} is not C{None}, the given password
+ will be used to decrypt the key (otherwise L{PasswordRequiredException}
+ is thrown). Through the magic of python, this factory method will
+ exist in all subclasses of PKey (such as L{RSAKey} or L{DSSKey}), but
+ is useless on the abstract PKey class.
+
+ @param filename: name of the file to read.
+ @type filename: str
+ @param password: an optional password to use to decrypt the key file,
+ if it's encrypted
+ @type password: str
+ @return: a new key object based on the given private key.
+ @rtype: L{PKey}
+
+ @raise IOError: if there was an error reading the file.
+ @raise PasswordRequiredException: if the private key file is
+ encrypted, and C{password} is C{None}.
+ @raise SSHException: if the key file is invalid.
+
+ @since: fearow
+ """
+ key = cl(filename=filename, password=password)
+ return key
+ from_private_key_file = classmethod(from_private_key_file)
+
+ def write_private_key_file(self, filename, password=None):
+ """
+ Write private key contents into a file. If the password is not
+ C{None}, the key is encrypted before writing.
+
+ @param filename: name of the file to write.
+ @type filename: str
+ @param password: an optional password to use to encrypt the key file.
+ @type password: str
+
+ @raise IOError: if there was an error writing the file.
+ @raise SSHException: if the key is invalid.
+
+ @since: fearow
+ """
+ raise exception('Not implemented in PKey')
+
+ def _read_private_key_file(self, tag, filename, password=None):
+ """
+ Read an SSH2-format private key file, looking for a string of the type
+ C{"BEGIN xxx PRIVATE KEY"} for some C{xxx}, base64-decode the text we
+ find, and return it as a string. If the private key is encrypted and
+ C{password} is not C{None}, the given password will be used to decrypt
+ the key (otherwise L{PasswordRequiredException} is thrown).
+
+ @param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block.
+ @type tag: str
+ @param filename: name of the file to read.
+ @type filename: str
+ @param password: an optional password to use to decrypt the key file,
+ if it's encrypted.
+ @type password: str
+ @return: data blob that makes up the private key.
+ @rtype: str
+
+ @raise IOError: if there was an error reading the file.
+ @raise PasswordRequiredException: if the private key file is
+ encrypted, and C{password} is C{None}.
+ @raise SSHException: if the key file is invalid.
+ """
+ f = open(filename, 'r')
+ lines = f.readlines()
+ f.close()
+ start = 0
+ while (start < len(lines)) and (lines[start].strip() != '-----BEGIN ' + tag + ' PRIVATE KEY-----'):
+ start += 1
+ if start >= len(lines):
+ raise SSHException('not a valid ' + tag + ' private key file')
+ # parse any headers first
+ headers = {}
+ start += 1
+ while start < len(lines):
+ l = lines[start].split(': ')
+ if len(l) == 1:
+ break
+ headers[l[0].lower()] = l[1].strip()
+ start += 1
+ # find end
+ end = start
+ while (lines[end].strip() != '-----END ' + tag + ' PRIVATE KEY-----') and (end < len(lines)):
+ end += 1
+ # if we trudged to the end of the file, just try to cope.
+ try:
+ data = base64.decodestring(''.join(lines[start:end]))
+ except binascii.Error, e:
+ raise SSHException('base64 decoding error: ' + str(e))
+ if not headers.has_key('proc-type'):
+ # unencryped: done
+ return data
+ # encrypted keyfile: will need a password
+ if headers['proc-type'] != '4,ENCRYPTED':
+ raise SSHException('Unknown private key structure "%s"' % headers['proc-type'])
+ try:
+ encryption_type, saltstr = headers['dek-info'].split(',')
+ except:
+ raise SSHException('Can\'t parse DEK-info in private key file')
+ if not self._CIPHER_TABLE.has_key(encryption_type):
+ raise SSHException('Unknown private key cipher "%s"' % encryption_type)
+ # if no password was passed in, raise an exception pointing out that we need one
+ if password is None:
+ raise PasswordRequiredException('Private key file is encrypted')
+ cipher = self._CIPHER_TABLE[encryption_type]['cipher']
+ keysize = self._CIPHER_TABLE[encryption_type]['keysize']
+ mode = self._CIPHER_TABLE[encryption_type]['mode']
+ salt = util.unhexify(saltstr)
+ key = util.generate_key_bytes(MD5, salt, password, keysize)
+ return cipher.new(key, mode, salt).decrypt(data)
+
+ def _write_private_key_file(self, tag, filename, data, password=None):
+ """
+ Write an SSH2-format private key file in a form that can be read by
+ paramiko or openssh. If no password is given, the key is written in
+ a trivially-encoded format (base64) which is completely insecure. If
+ a password is given, DES-EDE3-CBC is used.
+
+ @param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block.
+ @type tag: str
+ @param filename: name of the file to write.
+ @type filename: str
+ @param data: data blob that makes up the private key.
+ @type data: str
+ @param password: an optional password to use to encrypt the file.
+ @type password: str
+
+ @raise IOError: if there was an error writing the file.
+ """
+ f = open(filename, 'w', 0600)
+ # grrr... the mode doesn't always take hold
+ os.chmod(filename, 0600)
+ f.write('-----BEGIN %s PRIVATE KEY-----\n' % tag)
+ if password is not None:
+ # since we only support one cipher here, use it
+ cipher_name = self._CIPHER_TABLE.keys()[0]
+ cipher = self._CIPHER_TABLE[cipher_name]['cipher']
+ keysize = self._CIPHER_TABLE[cipher_name]['keysize']
+ blocksize = self._CIPHER_TABLE[cipher_name]['blocksize']
+ mode = self._CIPHER_TABLE[cipher_name]['mode']
+ salt = randpool.get_bytes(8)
+ key = util.generate_key_bytes(MD5, salt, password, keysize)
+ if len(data) % blocksize != 0:
+ n = blocksize - len(data) % blocksize
+ #data += randpool.get_bytes(n)
+ # that would make more sense ^, but it confuses openssh.
+ data += '\0' * n
+ data = cipher.new(key, mode, salt).encrypt(data)
+ f.write('Proc-Type: 4,ENCRYPTED\n')
+ f.write('DEK-Info: %s,%s\n' % (cipher_name, util.hexify(salt)))
+ f.write('\n')
+ s = base64.encodestring(data)
+ # re-wrap to 64-char lines
+ s = ''.join(s.split('\n'))
+ s = '\n'.join([s[i : i+64] for i in range(0, len(s), 64)])
+ f.write(s)
+ f.write('\n')
+ f.write('-----END %s PRIVATE KEY-----\n' % tag)
+ f.close()
diff --git a/paramiko/primes.py b/paramiko/primes.py
new file mode 100644
index 0000000..3677394
--- /dev/null
+++ b/paramiko/primes.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Utility functions for dealing with primes.
+"""
+
+from Crypto.Util import number
+
+from paramiko import util
+
+
+def _generate_prime(bits, randpool):
+ "primtive attempt at prime generation"
+ hbyte_mask = pow(2, bits % 8) - 1
+ while True:
+ # loop catches the case where we increment n into a higher bit-range
+ x = randpool.get_bytes((bits+7) // 8)
+ if hbyte_mask > 0:
+ x = chr(ord(x[0]) & hbyte_mask) + x[1:]
+ n = util.inflate_long(x, 1)
+ n |= 1
+ n |= (1 << (bits - 1))
+ while not number.isPrime(n):
+ n += 2
+ if util.bit_length(n) == bits:
+ return n
+
+def _roll_random(rpool, n):
+ "returns a random # from 0 to N-1"
+ bits = util.bit_length(n-1)
+ bytes = (bits + 7) // 8
+ hbyte_mask = pow(2, bits % 8) - 1
+
+ # so here's the plan:
+ # we fetch as many random bits as we'd need to fit N-1, and if the
+ # generated number is >= N, we try again. in the worst case (N-1 is a
+ # power of 2), we have slightly better than 50% odds of getting one that
+ # fits, so i can't guarantee that this loop will ever finish, but the odds
+ # of it looping forever should be infinitesimal.
+ while True:
+ x = rpool.get_bytes(bytes)
+ if hbyte_mask > 0:
+ x = chr(ord(x[0]) & hbyte_mask) + x[1:]
+ num = util.inflate_long(x, 1)
+ if num < n:
+ return num
+
+
+class ModulusPack (object):
+ """
+ convenience object for holding the contents of the /etc/ssh/moduli file,
+ on systems that have such a file.
+ """
+
+ def __init__(self, rpool):
+ # pack is a hash of: bits -> [ (generator, modulus) ... ]
+ self.pack = {}
+ self.discarded = []
+ self.randpool = rpool
+
+ def _parse_modulus(self, line):
+ timestamp, type, tests, tries, size, generator, modulus = line.split()
+ type = int(type)
+ tests = int(tests)
+ tries = int(tries)
+ size = int(size)
+ generator = int(generator)
+ modulus = long(modulus, 16)
+
+ # weed out primes that aren't at least:
+ # type 2 (meets basic structural requirements)
+ # test 4 (more than just a small-prime sieve)
+ # tries < 100 if test & 4 (at least 100 tries of miller-rabin)
+ if (type < 2) or (tests < 4) or ((tests & 4) and (tests < 8) and (tries < 100)):
+ self.discarded.append((modulus, 'does not meet basic requirements'))
+ return
+ if generator == 0:
+ generator = 2
+
+ # there's a bug in the ssh "moduli" file (yeah, i know: shock! dismay!
+ # call cnn!) where it understates the bit lengths of these primes by 1.
+ # this is okay.
+ bl = util.bit_length(modulus)
+ if (bl != size) and (bl != size + 1):
+ self.discarded.append((modulus, 'incorrectly reported bit length %d' % size))
+ return
+ if not self.pack.has_key(bl):
+ self.pack[bl] = []
+ self.pack[bl].append((generator, modulus))
+
+ def read_file(self, filename):
+ """
+ @raise IOError: passed from any file operations that fail.
+ """
+ self.pack = {}
+ f = open(filename, 'r')
+ for line in f:
+ line = line.strip()
+ if (len(line) == 0) or (line[0] == '#'):
+ continue
+ try:
+ self._parse_modulus(line)
+ except:
+ continue
+ f.close()
+
+ def get_modulus(self, min, prefer, max):
+ bitsizes = self.pack.keys()
+ bitsizes.sort()
+ if len(bitsizes) == 0:
+ raise SSHException('no moduli available')
+ good = -1
+ # find nearest bitsize >= preferred
+ for b in bitsizes:
+ if (b >= prefer) and (b < max) and ((b < good) or (good == -1)):
+ good = b
+ # if that failed, find greatest bitsize >= min
+ if good == -1:
+ for b in bitsizes:
+ if (b >= min) and (b < max) and (b > good):
+ good = b
+ if good == -1:
+ # their entire (min, max) range has no intersection with our range.
+ # if their range is below ours, pick the smallest. otherwise pick
+ # the largest. it'll be out of their range requirement either way,
+ # but we'll be sending them the closest one we have.
+ good = bitsizes[0]
+ if min > good:
+ good = bitsizes[-1]
+ # now pick a random modulus of this bitsize
+ n = _roll_random(self.randpool, len(self.pack[good]))
+ return self.pack[good][n]
diff --git a/paramiko/rsakey.py b/paramiko/rsakey.py
new file mode 100644
index 0000000..780ea1b
--- /dev/null
+++ b/paramiko/rsakey.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{RSAKey}
+"""
+
+from Crypto.PublicKey import RSA
+from Crypto.Hash import SHA, MD5
+from Crypto.Cipher import DES3
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ber import BER, BERException
+from paramiko.pkey import PKey
+from paramiko.ssh_exception import SSHException
+
+
+class RSAKey (PKey):
+ """
+ Representation of an RSA key which can be used to sign and verify SSH2
+ data.
+ """
+
+ def __init__(self, msg=None, data=None, filename=None, password=None, vals=None):
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if vals is not None:
+ self.e, self.n = vals
+ else:
+ if msg is None:
+ raise SSHException('Key object may not be empty')
+ if msg.get_string() != 'ssh-rsa':
+ raise SSHException('Invalid key')
+ self.e = msg.get_mpint()
+ self.n = msg.get_mpint()
+ self.size = util.bit_length(self.n)
+
+ def __str__(self):
+ m = Message()
+ m.add_string('ssh-rsa')
+ m.add_mpint(self.e)
+ m.add_mpint(self.n)
+ return str(m)
+
+ def __hash__(self):
+ h = hash(self.get_name())
+ h = h * 37 + hash(self.e)
+ h = h * 37 + hash(self.n)
+ return hash(h)
+
+ def get_name(self):
+ return 'ssh-rsa'
+
+ def get_bits(self):
+ return self.size
+
+ def can_sign(self):
+ return hasattr(self, 'd')
+
+ def sign_ssh_data(self, rpool, data):
+ digest = SHA.new(data).digest()
+ rsa = RSA.construct((long(self.n), long(self.e), long(self.d)))
+ sig = util.deflate_long(rsa.sign(self._pkcs1imify(digest), '')[0], 0)
+ m = Message()
+ m.add_string('ssh-rsa')
+ m.add_string(sig)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if msg.get_string() != 'ssh-rsa':
+ return False
+ sig = util.inflate_long(msg.get_string(), True)
+ # verify the signature by SHA'ing the data and encrypting it using the
+ # public key. some wackiness ensues where we "pkcs1imify" the 20-byte
+ # hash into a string as long as the RSA key.
+ hash = util.inflate_long(self._pkcs1imify(SHA.new(data).digest()), True)
+ rsa = RSA.construct((long(self.n), long(self.e)))
+ return rsa.verify(hash, (sig,))
+
+ def write_private_key_file(self, filename, password=None):
+ keylist = [ 0, self.n, self.e, self.d, self.p, self.q,
+ self.d % (self.p - 1), self.d % (self.q - 1),
+ util.mod_inverse(self.q, self.p) ]
+ try:
+ b = BER()
+ b.encode(keylist)
+ except BERException:
+ raise SSHException('Unable to create ber encoding of key')
+ self._write_private_key_file('RSA', filename, str(b), password)
+
+ def generate(bits, progress_func=None):
+ """
+ Generate a new private RSA key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ @param bits: number of bits the generated key should be.
+ @type bits: int
+ @param progress_func: an optional function to call at key points in
+ key generation (used by C{pyCrypto.PublicKey}).
+ @type progress_func: function
+ @return: new private key
+ @rtype: L{RSAKey}
+
+ @since: fearow
+ """
+ randpool.stir()
+ rsa = RSA.generate(bits, randpool.get_bytes, progress_func)
+ key = RSAKey(vals=(rsa.e, rsa.n))
+ key.d = rsa.d
+ key.p = rsa.p
+ key.q = rsa.q
+ return key
+ generate = staticmethod(generate)
+
+
+ ### internals...
+
+
+ def _pkcs1imify(self, data):
+ """
+ turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
+ using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
+ """
+ SHA1_DIGESTINFO = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
+ size = len(util.deflate_long(self.n, 0))
+ filler = '\xff' * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
+ return '\x00\x01' + filler + '\x00' + SHA1_DIGESTINFO + data
+
+ def _from_private_key_file(self, filename, password):
+ # private key file contains:
+ # RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p }
+ data = self._read_private_key_file('RSA', filename, password)
+ try:
+ keylist = BER(data).decode()
+ except BERException:
+ raise SSHException('Unable to parse key file')
+ if (type(keylist) is not list) or (len(keylist) < 4) or (keylist[0] != 0):
+ raise SSHException('Not a valid RSA private key file (bad ber encoding)')
+ self.n = keylist[1]
+ self.e = keylist[2]
+ self.d = keylist[3]
+ # not really needed
+ self.p = keylist[4]
+ self.q = keylist[5]
+ self.size = util.bit_length(self.n)
diff --git a/paramiko/server.py b/paramiko/server.py
new file mode 100644
index 0000000..a0e3988
--- /dev/null
+++ b/paramiko/server.py
@@ -0,0 +1,527 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{ServerInterface} is an interface to override for server support.
+"""
+
+import threading
+from paramiko.common import *
+from paramiko import util
+
+
+class InteractiveQuery (object):
+ """
+ A query (set of prompts) for a user during interactive authentication.
+ """
+
+ def __init__(self, name='', instructions='', *prompts):
+ """
+ Create a new interactive query to send to the client. The name and
+ instructions are optional, but are generally displayed to the end
+ user. A list of prompts may be included, or they may be added via
+ the L{add_prompt} method.
+
+ @param name: name of this query
+ @type name: str
+ @param instructions: user instructions (usually short) about this query
+ @type instructions: str
+ """
+ self.name = name
+ self.instructions = instructions
+ self.prompts = []
+ for x in prompts:
+ if (type(x) is str) or (type(x) is unicode):
+ self.add_prompt(x)
+ else:
+ self.add_prompt(x[0], x[1])
+
+ def add_prompt(self, prompt, echo=True):
+ """
+ Add a prompt to this query. The prompt should be a (reasonably short)
+ string. Multiple prompts can be added to the same query.
+
+ @param prompt: the user prompt
+ @type prompt: str
+ @param echo: C{True} (default) if the user's response should be echoed;
+ C{False} if not (for a password or similar)
+ @type echo: bool
+ """
+ self.prompts.append((prompt, echo))
+
+
+class ServerInterface (object):
+ """
+ This class defines an interface for controlling the behavior of paramiko
+ in server mode.
+
+ Methods on this class are called from paramiko's primary thread, so you
+ shouldn't do too much work in them. (Certainly nothing that blocks or
+ sleeps.)
+ """
+
+ def check_channel_request(self, kind, chanid):
+ """
+ Determine if a channel request of a given type will be granted, and
+ return C{OPEN_SUCCEEDED} or an error code. This method is
+ called in server mode when the client requests a channel, after
+ authentication is complete.
+
+ If you allow channel requests (and an ssh server that didn't would be
+ useless), you should also override some of the channel request methods
+ below, which are used to determine which services will be allowed on
+ a given channel:
+ - L{check_channel_pty_request}
+ - L{check_channel_shell_request}
+ - L{check_channel_subsystem_request}
+ - L{check_channel_window_change_request}
+
+ The C{chanid} parameter is a small number that uniquely identifies the
+ channel within a L{Transport}. A L{Channel} object is not created
+ unless this method returns C{OPEN_SUCCEEDED} -- once a
+ L{Channel} object is created, you can call L{Channel.get_id} to
+ retrieve the channel ID.
+
+ The return value should either be C{OPEN_SUCCEEDED} (or
+ C{0}) to allow the channel request, or one of the following error
+ codes to reject it:
+ - C{OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED}
+ - C{OPEN_FAILED_CONNECT_FAILED}
+ - C{OPEN_FAILED_UNKNOWN_CHANNEL_TYPE}
+ - C{OPEN_FAILED_RESOURCE_SHORTAGE}
+
+ The default implementation always returns
+ C{OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED}.
+
+ @param kind: the kind of channel the client would like to open
+ (usually C{"session"}).
+ @type kind: str
+ @param chanid: ID of the channel
+ @type chanid: int
+ @return: a success or failure code (listed above)
+ @rtype: int
+ """
+ return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+
+ def get_allowed_auths(self, username):
+ """
+ Return a list of authentication methods supported by the server.
+ This list is sent to clients attempting to authenticate, to inform them
+ of authentication methods that might be successful.
+
+ The "list" is actually a string of comma-separated names of types of
+ authentication. Possible values are C{"password"}, C{"publickey"},
+ and C{"none"}.
+
+ The default implementation always returns C{"password"}.
+
+ @param username: the username requesting authentication.
+ @type username: str
+ @return: a comma-separated list of authentication types
+ @rtype: str
+ """
+ return 'password'
+
+ def check_auth_none(self, username):
+ """
+ Determine if a client may open channels with no (further)
+ authentication.
+
+ Return L{AUTH_FAILED} if the client must authenticate, or
+ L{AUTH_SUCCESSFUL} if it's okay for the client to not
+ authenticate.
+
+ The default implementation always returns L{AUTH_FAILED}.
+
+ @param username: the username of the client.
+ @type username: str
+ @return: L{AUTH_FAILED} if the authentication fails;
+ L{AUTH_SUCCESSFUL} if it succeeds.
+ @rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_password(self, username, password):
+ """
+ Determine if a given username and password supplied by the client is
+ acceptable for use in authentication.
+
+ Return L{AUTH_FAILED} if the password is not accepted,
+ L{AUTH_SUCCESSFUL} if the password is accepted and completes
+ the authentication, or L{AUTH_PARTIALLY_SUCCESSFUL} if your
+ authentication is stateful, and this key is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, L{get_allowed_auths} will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ The default implementation always returns L{AUTH_FAILED}.
+
+ @param username: the username of the authenticating client.
+ @type username: str
+ @param password: the password given by the client.
+ @type password: str
+ @return: L{AUTH_FAILED} if the authentication fails;
+ L{AUTH_SUCCESSFUL} if it succeeds;
+ L{AUTH_PARTIALLY_SUCCESSFUL} if the password auth is
+ successful, but authentication must continue.
+ @rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_publickey(self, username, key):
+ """
+ Determine if a given key supplied by the client is acceptable for use
+ in authentication. You should override this method in server mode to
+ check the username and key and decide if you would accept a signature
+ made using this key.
+
+ Return L{AUTH_FAILED} if the key is not accepted,
+ L{AUTH_SUCCESSFUL} if the key is accepted and completes the
+ authentication, or L{AUTH_PARTIALLY_SUCCESSFUL} if your
+ authentication is stateful, and this password is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, L{get_allowed_auths} will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ Note that you don't have to actually verify any key signtature here.
+ If you're willing to accept the key, paramiko will do the work of
+ verifying the client's signature.
+
+ The default implementation always returns L{AUTH_FAILED}.
+
+ @param username: the username of the authenticating client
+ @type username: str
+ @param key: the key object provided by the client
+ @type key: L{PKey <pkey.PKey>}
+ @return: L{AUTH_FAILED} if the client can't authenticate
+ with this key; L{AUTH_SUCCESSFUL} if it can;
+ L{AUTH_PARTIALLY_SUCCESSFUL} if it can authenticate with
+ this key but must continue with authentication
+ @rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_interactive(self, username, submethods):
+ """
+ Begin an interactive authentication challenge, if supported. You
+ should override this method in server mode if you want to support the
+ C{"keyboard-interactive"} auth type, which requires you to send a
+ series of questions for the client to answer.
+
+ Return L{AUTH_FAILED} if this auth method isn't supported. Otherwise,
+ you should return an L{InteractiveQuery} object containing the prompts
+ and instructions for the user. The response will be sent via a call
+ to L{check_auth_interactive_response}.
+
+ The default implementation always returns L{AUTH_FAILED}.
+
+ @param username: the username of the authenticating client
+ @type username: str
+ @param submethods: a comma-separated list of methods preferred by the
+ client (usually empty)
+ @type submethods: str
+ @return: L{AUTH_FAILED} if this auth method isn't supported; otherwise
+ an object containing queries for the user
+ @rtype: int or L{InteractiveQuery}
+ """
+ return AUTH_FAILED
+
+ def check_auth_interactive_response(self, responses):
+ """
+ Continue or finish an interactive authentication challenge, if
+ supported. You should override this method in server mode if you want
+ to support the C{"keyboard-interactive"} auth type.
+
+ Return L{AUTH_FAILED} if the responses are not accepted,
+ L{AUTH_SUCCESSFUL} if the responses are accepted and complete
+ the authentication, or L{AUTH_PARTIALLY_SUCCESSFUL} if your
+ authentication is stateful, and this set of responses is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, L{get_allowed_auths} will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ If you wish to continue interactive authentication with more questions,
+ you may return an L{InteractiveQuery} object, which should cause the
+ client to respond with more answers, calling this method again. This
+ cycle can continue indefinitely.
+
+ The default implementation always returns L{AUTH_FAILED}.
+
+ @param responses: list of responses from the client
+ @type responses: list(str)
+ @return: L{AUTH_FAILED} if the authentication fails;
+ L{AUTH_SUCCESSFUL} if it succeeds;
+ L{AUTH_PARTIALLY_SUCCESSFUL} if the interactive auth is
+ successful, but authentication must continue; otherwise an object
+ containing queries for the user
+ @rtype: int or L{InteractiveQuery}
+ """
+ return AUTH_FAILED
+
+ def check_global_request(self, kind, msg):
+ """
+ Handle a global request of the given C{kind}. This method is called
+ in server mode and client mode, whenever the remote host makes a global
+ request. If there are any arguments to the request, they will be in
+ C{msg}.
+
+ There aren't any useful global requests defined, aside from port
+ forwarding, so usually this type of request is an extension to the
+ protocol.
+
+ If the request was successful and you would like to return contextual
+ data to the remote host, return a tuple. Items in the tuple will be
+ sent back with the successful result. (Note that the items in the
+ tuple can only be strings, ints, longs, or bools.)
+
+ The default implementation always returns C{False}, indicating that it
+ does not support any global requests.
+
+ @param kind: the kind of global request being made.
+ @type kind: str
+ @param msg: any extra arguments to the request.
+ @type msg: L{Message}
+ @return: C{True} or a tuple of data if the request was granted;
+ C{False} otherwise.
+ @rtype: bool
+ """
+ return False
+
+
+ ### Channel requests
+
+
+ def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight,
+ modes):
+ """
+ Determine if a pseudo-terminal of the given dimensions (usually
+ requested for shell access) can be provided on the given channel.
+
+ The default implementation always returns C{False}.
+
+ @param channel: the L{Channel} the pty request arrived on.
+ @type channel: L{Channel}
+ @param term: type of terminal requested (for example, C{"vt100"}).
+ @type term: str
+ @param width: width of screen in characters.
+ @type width: int
+ @param height: height of screen in characters.
+ @type height: int
+ @param pixelwidth: width of screen in pixels, if known (may be C{0} if
+ unknown).
+ @type pixelwidth: int
+ @param pixelheight: height of screen in pixels, if known (may be C{0}
+ if unknown).
+ @type pixelheight: int
+ @return: C{True} if the psuedo-terminal has been allocated; C{False}
+ otherwise.
+ @rtype: bool
+ """
+ return False
+
+ def check_channel_shell_request(self, channel):
+ """
+ Determine if a shell will be provided to the client on the given
+ channel. If this method returns C{True}, the channel should be
+ connected to the stdin/stdout of a shell (or something that acts like
+ a shell).
+
+ The default implementation always returns C{False}.
+
+ @param channel: the L{Channel} the request arrived on.
+ @type channel: L{Channel}
+ @return: C{True} if this channel is now hooked up to a shell; C{False}
+ if a shell can't or won't be provided.
+ @rtype: bool
+ """
+ return False
+
+ def check_channel_exec_request(self, channel, command):
+ """
+ Determine if a shell command will be executed for the client. If this
+ method returns C{True}, the channel should be connected to the stdin,
+ stdout, and stderr of the shell command.
+
+ The default implementation always returns C{False}.
+
+ @param channel: the L{Channel} the request arrived on.
+ @type channel: L{Channel}
+ @param command: the command to execute.
+ @type command: str
+ @return: C{True} if this channel is now hooked up to the stdin,
+ stdout, and stderr of the executing command; C{False} if the
+ command will not be executed.
+ @rtype: bool
+
+ @since: 1.1
+ """
+ return False
+
+ def check_channel_subsystem_request(self, channel, name):
+ """
+ Determine if a requested subsystem will be provided to the client on
+ the given channel. If this method returns C{True}, all future I/O
+ through this channel will be assumed to be connected to the requested
+ subsystem. An example of a subsystem is C{sftp}.
+
+ The default implementation checks for a subsystem handler assigned via
+ L{Transport.set_subsystem_handler}.
+ If one has been set, the handler is invoked and this method returns
+ C{True}. Otherwise it returns C{False}.
+
+ @note: Because the default implementation uses the L{Transport} to
+ identify valid subsystems, you probably won't need to override this
+ method.
+
+ @param channel: the L{Channel} the pty request arrived on.
+ @type channel: L{Channel}
+ @param name: name of the requested subsystem.
+ @type name: str
+ @return: C{True} if this channel is now hooked up to the requested
+ subsystem; C{False} if that subsystem can't or won't be provided.
+ @rtype: bool
+ """
+ handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name)
+ if handler_class is None:
+ return False
+ handler = handler_class(channel, name, self, *larg, **kwarg)
+ handler.start()
+ return True
+
+ def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight):
+ """
+ Determine if the pseudo-terminal on the given channel can be resized.
+ This only makes sense if a pty was previously allocated on it.
+
+ The default implementation always returns C{False}.
+
+ @param channel: the L{Channel} the pty request arrived on.
+ @type channel: L{Channel}
+ @param width: width of screen in characters.
+ @type width: int
+ @param height: height of screen in characters.
+ @type height: int
+ @param pixelwidth: width of screen in pixels, if known (may be C{0} if
+ unknown).
+ @type pixelwidth: int
+ @param pixelheight: height of screen in pixels, if known (may be C{0}
+ if unknown).
+ @type pixelheight: int
+ @return: C{True} if the terminal was resized; C{False} if not.
+ @rtype: bool
+ """
+ return False
+
+
+class SubsystemHandler (threading.Thread):
+ """
+ Handler for a subsytem in server mode. If you create a subclass of this
+ class and pass it to
+ L{Transport.set_subsystem_handler},
+ an object of this
+ class will be created for each request for this subsystem. Each new object
+ will be executed within its own new thread by calling L{start_subsystem}.
+ When that method completes, the channel is closed.
+
+ For example, if you made a subclass C{MP3Handler} and registered it as the
+ handler for subsystem C{"mp3"}, then whenever a client has successfully
+ authenticated and requests subsytem C{"mp3"}, an object of class
+ C{MP3Handler} will be created, and L{start_subsystem} will be called on
+ it from a new thread.
+
+ @since: ivysaur
+ """
+ def __init__(self, channel, name, server):
+ """
+ Create a new handler for a channel. This is used by L{ServerInterface}
+ to start up a new handler when a channel requests this subsystem. You
+ don't need to override this method, but if you do, be sure to pass the
+ C{channel} and C{name} parameters through to the original C{__init__}
+ method here.
+
+ @param channel: the channel associated with this subsystem request.
+ @type channel: L{Channel}
+ @param name: name of the requested subsystem.
+ @type name: str
+ @param server: the server object for the session that started this
+ subsystem
+ @type server: L{ServerInterface}
+ """
+ threading.Thread.__init__(self, target=self._run)
+ self.__channel = channel
+ self.__transport = channel.get_transport()
+ self.__name = name
+ self.__server = server
+
+ def get_server(self):
+ """
+ Return the L{ServerInterface} object associated with this channel and
+ subsystem.
+
+ @rtype: L{ServerInterface}
+ """
+ return self.__server
+
+ def _run(self):
+ try:
+ self.__transport._log(DEBUG, 'Starting handler for subsystem %s' % self.__name)
+ self.start_subsystem(self.__name, self.__transport, self.__channel)
+ except Exception, e:
+ self.__transport._log(ERROR, 'Exception in subsystem handler for "%s": %s' %
+ (self.__name, str(e)))
+ self.__transport._log(ERROR, util.tb_strings())
+ try:
+ self.finish_subsystem()
+ except:
+ pass
+
+ def start_subsystem(self, name, transport, channel):
+ """
+ Process an ssh subsystem in server mode. This method is called on a
+ new object (and in a new thread) for each subsystem request. It is
+ assumed that all subsystem logic will take place here, and when the
+ subsystem is finished, this method will return. After this method
+ returns, the channel is closed.
+
+ The combination of C{transport} and C{channel} are unique; this handler
+ corresponds to exactly one L{Channel} on one L{Transport}.
+
+ @note: It is the responsibility of this method to exit if the
+ underlying L{Transport} is closed. This can be done by checking
+ L{Transport.is_active} or noticing an EOF
+ on the L{Channel}. If this method loops forever without checking
+ for this case, your python interpreter may refuse to exit because
+ this thread will still be running.
+
+ @param name: name of the requested subsystem.
+ @type name: str
+ @param transport: the server-mode L{Transport}.
+ @type transport: L{Transport}
+ @param channel: the channel associated with this subsystem request.
+ @type channel: L{Channel}
+ """
+ pass
+
+ def finish_subsystem(self):
+ """
+ Perform any cleanup at the end of a subsystem. The default
+ implementation just closes the channel.
+
+ @since: 1.1
+ """
+ self.__channel.close()
diff --git a/paramiko/sftp.py b/paramiko/sftp.py
new file mode 100644
index 0000000..58d7103
--- /dev/null
+++ b/paramiko/sftp.py
@@ -0,0 +1,168 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+import socket
+import struct
+
+from paramiko.common import *
+from paramiko import util
+from paramiko.channel import Channel
+from paramiko.message import Message
+
+
+CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, CMD_FSTAT, \
+ CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, CMD_REMOVE, CMD_MKDIR, \
+ CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, CMD_READLINK, CMD_SYMLINK \
+ = range(1, 21)
+CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS = range(101, 106)
+CMD_EXTENDED, CMD_EXTENDED_REPLY = range(200, 202)
+
+SFTP_OK = 0
+SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, SFTP_BAD_MESSAGE, \
+ SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED = range(1, 9)
+
+SFTP_DESC = [ 'Success',
+ 'End of file',
+ 'No such file',
+ 'Permission denied',
+ 'Failure',
+ 'Bad message',
+ 'No connection',
+ 'Connection lost',
+ 'Operation unsupported' ]
+
+SFTP_FLAG_READ = 0x1
+SFTP_FLAG_WRITE = 0x2
+SFTP_FLAG_APPEND = 0x4
+SFTP_FLAG_CREATE = 0x8
+SFTP_FLAG_TRUNC = 0x10
+SFTP_FLAG_EXCL = 0x20
+
+_VERSION = 3
+
+
+# for debugging
+CMD_NAMES = {
+ CMD_INIT: 'init',
+ CMD_VERSION: 'version',
+ CMD_OPEN: 'open',
+ CMD_CLOSE: 'close',
+ CMD_READ: 'read',
+ CMD_WRITE: 'write',
+ CMD_LSTAT: 'lstat',
+ CMD_FSTAT: 'fstat',
+ CMD_SETSTAT: 'setstat',
+ CMD_FSETSTAT: 'fsetstat',
+ CMD_OPENDIR: 'opendir',
+ CMD_READDIR: 'readdir',
+ CMD_REMOVE: 'remove',
+ CMD_MKDIR: 'mkdir',
+ CMD_RMDIR: 'rmdir',
+ CMD_REALPATH: 'realpath',
+ CMD_STAT: 'stat',
+ CMD_RENAME: 'rename',
+ CMD_READLINK: 'readlink',
+ CMD_SYMLINK: 'symlink',
+ CMD_STATUS: 'status',
+ CMD_HANDLE: 'handle',
+ CMD_DATA: 'data',
+ CMD_NAME: 'name',
+ CMD_ATTRS: 'attrs',
+ CMD_EXTENDED: 'extended',
+ CMD_EXTENDED_REPLY: 'extended_reply'
+ }
+
+
+class SFTPError (Exception):
+ pass
+
+
+class BaseSFTP (object):
+ def __init__(self):
+ self.logger = util.get_logger('paramiko.sftp')
+ self.sock = None
+ self.ultra_debug = False
+
+
+ ### internals...
+
+
+ def _send_version(self):
+ self._send_packet(CMD_INIT, struct.pack('>I', _VERSION))
+ t, data = self._read_packet()
+ if t != CMD_VERSION:
+ raise SFTPError('Incompatible sftp protocol')
+ version = struct.unpack('>I', data[:4])[0]
+ # if version != _VERSION:
+ # raise SFTPError('Incompatible sftp protocol')
+ return version
+
+ def _send_server_version(self):
+ # advertise that we support "check-file"
+ extension_pairs = [ 'check-file', 'md5,sha1' ]
+ msg = Message()
+ msg.add_int(_VERSION)
+ msg.add(*extension_pairs)
+ self._send_packet(CMD_VERSION, str(msg))
+ t, data = self._read_packet()
+ if t != CMD_INIT:
+ raise SFTPError('Incompatible sftp protocol')
+ version = struct.unpack('>I', data[:4])[0]
+ return version
+
+ def _log(self, level, msg):
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.logger.log(level, m)
+ else:
+ self.logger.log(level, msg)
+
+ def _write_all(self, out):
+ while len(out) > 0:
+ n = self.sock.send(out)
+ if n <= 0:
+ raise EOFError()
+ if n == len(out):
+ return
+ out = out[n:]
+ return
+
+ def _read_all(self, n):
+ out = ''
+ while n > 0:
+ x = self.sock.recv(n)
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ return out
+
+ def _send_packet(self, t, packet):
+ out = struct.pack('>I', len(packet) + 1) + chr(t) + packet
+ if self.ultra_debug:
+ self._log(DEBUG, util.format_binary(out, 'OUT: '))
+ self._write_all(out)
+
+ def _read_packet(self):
+ size = struct.unpack('>I', self._read_all(4))[0]
+ data = self._read_all(size)
+ if self.ultra_debug:
+ self._log(DEBUG, util.format_binary(data, 'IN: '));
+ if size > 0:
+ return ord(data[0]), data[1:]
+ return 0, ''
diff --git a/paramiko/sftp_attr.py b/paramiko/sftp_attr.py
new file mode 100644
index 0000000..eae7c99
--- /dev/null
+++ b/paramiko/sftp_attr.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+import stat
+import time
+from paramiko.common import *
+from paramiko.sftp import *
+
+
+class SFTPAttributes (object):
+ """
+ Representation of the attributes of a file (or proxied file) for SFTP in
+ client or server mode. It attemps to mirror the object returned by
+ C{os.stat} as closely as possible, so it may have the following fields,
+ with the same meanings as those returned by an C{os.stat} object:
+ - st_size
+ - st_uid
+ - st_gid
+ - st_mode
+ - st_atime
+ - st_mtime
+
+ Because SFTP allows flags to have other arbitrary named attributes, these
+ are stored in a dict named C{attr}. Occasionally, the filename is also
+ stored, in C{filename}.
+ """
+
+ FLAG_SIZE = 1
+ FLAG_UIDGID = 2
+ FLAG_PERMISSIONS = 4
+ FLAG_AMTIME = 8
+ FLAG_EXTENDED = 0x80000000L
+
+ def __init__(self):
+ """
+ Create a new (empty) SFTPAttributes object. All fields will be empty.
+ """
+ self._flags = 0
+ self.attr = {}
+
+ def from_stat(cls, obj, filename=None):
+ """
+ Create an SFTPAttributes object from an existing C{stat} object (an
+ object returned by C{os.stat}).
+
+ @param obj: an object returned by C{os.stat} (or equivalent).
+ @type obj: object
+ @param filename: the filename associated with this file.
+ @type filename: str
+ @return: new L{SFTPAttributes} object with the same attribute fields.
+ @rtype: L{SFTPAttributes}
+ """
+ attr = cls()
+ attr.st_size = obj.st_size
+ attr.st_uid = obj.st_uid
+ attr.st_gid = obj.st_gid
+ attr.st_mode = obj.st_mode
+ attr.st_atime = obj.st_atime
+ attr.st_mtime = obj.st_mtime
+ if filename is not None:
+ attr.filename = filename
+ return attr
+ from_stat = classmethod(from_stat)
+
+ def __repr__(self):
+ return '<SFTPAttributes: %s>' % self._debug_str()
+
+ def __str__(self):
+ return self._debug_str()
+
+
+ ### internals...
+
+
+ def _from_msg(cls, msg, filename=None):
+ attr = cls()
+ attr._unpack(msg)
+ if filename is not None:
+ attr.filename = filename
+ return attr
+ _from_msg = classmethod(_from_msg)
+
+ def _unpack(self, msg):
+ self._flags = msg.get_int()
+ if self._flags & self.FLAG_SIZE:
+ self.st_size = msg.get_int64()
+ if self._flags & self.FLAG_UIDGID:
+ self.st_uid = msg.get_int()
+ self.st_gid = msg.get_int()
+ if self._flags & self.FLAG_PERMISSIONS:
+ self.st_mode = msg.get_int()
+ if self._flags & self.FLAG_AMTIME:
+ self.st_atime = msg.get_int()
+ self.st_mtime = msg.get_int()
+ if self._flags & self.FLAG_EXTENDED:
+ count = msg.get_int()
+ for i in range(count):
+ self.attr[msg.get_string()] = msg.get_string()
+
+ def _pack(self, msg):
+ self._flags = 0
+ if hasattr(self, 'st_size'):
+ self._flags |= self.FLAG_SIZE
+ if hasattr(self, 'st_uid') or hasattr(self, 'st_gid'):
+ self._flags |= self.FLAG_UIDGID
+ if hasattr(self, 'st_mode'):
+ self._flags |= self.FLAG_PERMISSIONS
+ if hasattr(self, 'st_atime') or hasattr(self, 'st_mtime'):
+ self._flags |= self.FLAG_AMTIME
+ if len(self.attr) > 0:
+ self._flags |= self.FLAG_EXTENDED
+ msg.add_int(self._flags)
+ if self._flags & self.FLAG_SIZE:
+ msg.add_int64(self.st_size)
+ if self._flags & self.FLAG_UIDGID:
+ msg.add_int(getattr(self, 'st_uid', 0))
+ msg.add_int(getattr(self, 'st_gid', 0))
+ if self._flags & self.FLAG_PERMISSIONS:
+ msg.add_int(self.st_mode)
+ if self._flags & self.FLAG_AMTIME:
+ msg.add_int(getattr(self, 'st_atime', 0))
+ msg.add_int(getattr(self, 'st_mtime', 0))
+ if self._flags & self.FLAG_EXTENDED:
+ msg.add_int(len(self.attr))
+ for key, val in self.attr.iteritems():
+ msg.add_string(key)
+ msg.add_string(val)
+ return
+
+ def _debug_str(self):
+ out = '[ '
+ if hasattr(self, 'st_size'):
+ out += 'size=%d ' % self.st_size
+ if hasattr(self, 'st_uid') or hasattr(self, 'st_gid'):
+ out += 'uid=%d gid=%d ' % (getattr(self, 'st_uid', 0), getattr(self, 'st_gid', 0))
+ if hasattr(self, 'st_mode'):
+ out += 'mode=' + oct(self.st_mode) + ' '
+ if hasattr(self, 'st_atime') or hasattr(self, 'st_mtime'):
+ out += 'atime=%d mtime=%d ' % (getattr(self, 'st_atime', 0),
+ getattr(self, 'st_mtime', 0))
+ for k, v in self.attr.iteritems():
+ out += '"%s"=%r ' % (str(k), v)
+ out += ']'
+ return out
+
+ def _rwx(n, suid, sticky=False):
+ if suid:
+ suid = 2
+ out = '-r'[n >> 2] + '-w'[(n >> 1) & 1]
+ if sticky:
+ out += '-xTt'[suid + (n & 1)]
+ else:
+ out += '-xSs'[suid + (n & 1)]
+ return out
+ _rwx = staticmethod(_rwx)
+
+ def __str__(self):
+ "create a unix-style long description of the file (like ls -l)"
+ if hasattr(self, 'st_mode'):
+ kind = stat.S_IFMT(self.st_mode)
+ if kind == stat.S_IFIFO:
+ ks = 'p'
+ elif kind == stat.S_IFCHR:
+ ks = 'c'
+ elif kind == stat.S_IFDIR:
+ ks = 'd'
+ elif kind == stat.S_IFBLK:
+ ks = 'b'
+ elif kind == stat.S_IFREG:
+ ks = '-'
+ elif kind == stat.S_IFLNK:
+ ks = 'l'
+ elif kind == stat.S_IFSOCK:
+ ks = 's'
+ else:
+ ks = '?'
+ ks += self._rwx((self.st_mode & 0700) >> 6, self.st_mode & stat.S_ISUID)
+ ks += self._rwx((self.st_mode & 070) >> 3, self.st_mode & stat.S_ISGID)
+ ks += self._rwx(self.st_mode & 7, self.st_mode & stat.S_ISVTX, True)
+ else:
+ ks = '?---------'
+ uid = getattr(self, 'st_uid', -1)
+ gid = getattr(self, 'st_gid', -1)
+ size = getattr(self, 'st_size', -1)
+ mtime = getattr(self, 'st_mtime', 0)
+ # compute display date
+ if abs(time.time() - mtime) > 15552000:
+ # (15552000 = 6 months)
+ datestr = time.strftime('%d %b %Y', time.localtime(mtime))
+ else:
+ datestr = time.strftime('%d %b %H:%M', time.localtime(mtime))
+ filename = getattr(self, 'filename', '?')
+ return '%s 1 %-8d %-8d %8d %-12s %s' % (ks, uid, gid, size, datestr, filename)
diff --git a/paramiko/sftp_client.py b/paramiko/sftp_client.py
new file mode 100644
index 0000000..2fe89e9
--- /dev/null
+++ b/paramiko/sftp_client.py
@@ -0,0 +1,618 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Client-mode SFTP support.
+"""
+
+import errno
+import os
+import threading
+import weakref
+from paramiko.sftp import *
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.sftp_file import SFTPFile
+
+
+def _to_unicode(s):
+ "if a str is not ascii, decode its utf8 into unicode"
+ try:
+ return s.encode('ascii')
+ except:
+ return s.decode('utf-8')
+
+
+class SFTPClient (BaseSFTP):
+ """
+ SFTP client object. C{SFTPClient} is used to open an sftp session across
+ an open ssh L{Transport} and do remote file operations.
+ """
+
+ def __init__(self, sock):
+ """
+ Create an SFTP client from an existing L{Channel}. The channel
+ should already have requested the C{"sftp"} subsystem.
+
+ An alternate way to create an SFTP client context is by using
+ L{from_transport}.
+
+ @param sock: an open L{Channel} using the C{"sftp"} subsystem.
+ @type sock: L{Channel}
+ """
+ BaseSFTP.__init__(self)
+ self.sock = sock
+ self.ultra_debug = False
+ self.request_number = 1
+ # lock for request_number
+ self._lock = threading.Lock()
+ self._cwd = None
+ # request # -> SFTPFile
+ self._expecting = weakref.WeakValueDictionary()
+ if type(sock) is Channel:
+ # override default logger
+ transport = self.sock.get_transport()
+ self.logger = util.get_logger(transport.get_log_channel() + '.' +
+ self.sock.get_name() + '.sftp')
+ self.ultra_debug = transport.get_hexdump()
+ self._send_version()
+
+ def __del__(self):
+ self.close()
+
+ def from_transport(selfclass, t):
+ """
+ Create an SFTP client channel from an open L{Transport}.
+
+ @param t: an open L{Transport} which is already authenticated.
+ @type t: L{Transport}
+ @return: a new L{SFTPClient} object, referring to an sftp session
+ (channel) across the transport.
+ @rtype: L{SFTPClient}
+ """
+ chan = t.open_session()
+ if chan is None:
+ return None
+ if not chan.invoke_subsystem('sftp'):
+ raise SFTPError('Failed to invoke sftp subsystem')
+ return selfclass(chan)
+ from_transport = classmethod(from_transport)
+
+ def close(self):
+ """
+ Close the SFTP session and its underlying channel.
+
+ @since: 1.4
+ """
+ self.sock.close()
+
+ def listdir(self, path='.'):
+ """
+ Return a list containing the names of the entries in the given C{path}.
+ The list is in arbitrary order. It does not include the special
+ entries C{'.'} and C{'..'} even if they are present in the folder.
+ This method is meant to mirror C{os.listdir} as closely as possible.
+ For a list of full L{SFTPAttributes} objects, see L{listdir_attr}.
+
+ @param path: path to list (defaults to C{'.'})
+ @type path: str
+ @return: list of filenames
+ @rtype: list of str
+ """
+ return [f.filename for f in self.listdir_attr(path)]
+
+ def listdir_attr(self, path='.'):
+ """
+ Return a list containing L{SFTPAttributes} objects corresponding to
+ files in the given C{path}. The list is in arbitrary order. It does
+ not include the special entries C{'.'} and C{'..'} even if they are
+ present in the folder.
+
+ @param path: path to list (defaults to C{'.'})
+ @type path: str
+ @return: list of attributes
+ @rtype: list of L{SFTPAttributes}
+
+ @since: 1.2
+ """
+ path = self._adjust_cwd(path)
+ t, msg = self._request(CMD_OPENDIR, path)
+ if t != CMD_HANDLE:
+ raise SFTPError('Expected handle')
+ handle = msg.get_string()
+ filelist = []
+ while True:
+ try:
+ t, msg = self._request(CMD_READDIR, handle)
+ except EOFError, e:
+ # done with handle
+ break
+ if t != CMD_NAME:
+ raise SFTPError('Expected name response')
+ count = msg.get_int()
+ for i in range(count):
+ filename = _to_unicode(msg.get_string())
+ longname = _to_unicode(msg.get_string())
+ attr = SFTPAttributes._from_msg(msg, filename)
+ if (filename != '.') and (filename != '..'):
+ filelist.append(attr)
+ self._request(CMD_CLOSE, handle)
+ return filelist
+
+ def file(self, filename, mode='r', bufsize=-1):
+ """
+ Open a file on the remote server. The arguments are the same as for
+ python's built-in C{file} (aka C{open}). A file-like object is
+ returned, which closely mimics the behavior of a normal python file
+ object.
+
+ The mode indicates how the file is to be opened: C{'r'} for reading,
+ C{'w'} for writing (truncating an existing file), C{'a'} for appending,
+ C{'r+'} for reading/writing, C{'w+'} for reading/writing (truncating an
+ existing file), C{'a+'} for reading/appending. The python C{'b'} flag
+ is ignored, since SSH treats all files as binary. The C{'U'} flag is
+ supported in a compatible way.
+
+ Since 1.5.2, an C{'x'} flag indicates that the operation should only
+ succeed if the file was created and did not previously exist. This has
+ no direct mapping to python's file flags, but is commonly known as the
+ C{O_EXCL} flag in posix.
+
+ The file will be buffered in standard python style by default, but
+ can be altered with the C{bufsize} parameter. C{0} turns off
+ buffering, C{1} uses line buffering, and any number greater than 1
+ (C{>1}) uses that specific buffer size.
+
+ @param filename: name of the file to open.
+ @type filename: string
+ @param mode: mode (python-style) to open in.
+ @type mode: string
+ @param bufsize: desired buffering (-1 = default buffer size)
+ @type bufsize: int
+ @return: a file object representing the open file.
+ @rtype: SFTPFile
+
+ @raise IOError: if the file could not be opened.
+ """
+ filename = self._adjust_cwd(filename)
+ imode = 0
+ if ('r' in mode) or ('+' in mode):
+ imode |= SFTP_FLAG_READ
+ if ('w' in mode) or ('+' in mode) or ('a' in mode):
+ imode |= SFTP_FLAG_WRITE
+ if ('w' in mode):
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
+ if ('a' in mode):
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
+ if ('x' in mode):
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
+ attrblock = SFTPAttributes()
+ t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
+ if t != CMD_HANDLE:
+ raise SFTPError('Expected handle')
+ handle = msg.get_string()
+ return SFTPFile(self, handle, mode, bufsize)
+
+ # python has migrated toward file() instead of open().
+ # and really, that's more easily identifiable.
+ open = file
+
+ def remove(self, path):
+ """
+ Remove the file at the given path.
+
+ @param path: path (absolute or relative) of the file to remove.
+ @type path: string
+
+ @raise IOError: if the path refers to a folder (directory). Use
+ L{rmdir} to remove a folder.
+ """
+ path = self._adjust_cwd(path)
+ self._request(CMD_REMOVE, path)
+
+ unlink = remove
+
+ def rename(self, oldpath, newpath):
+ """
+ Rename a file or folder from C{oldpath} to C{newpath}.
+
+ @param oldpath: existing name of the file or folder.
+ @type oldpath: string
+ @param newpath: new name for the file or folder.
+ @type newpath: string
+
+ @raise IOError: if C{newpath} is a folder, or something else goes
+ wrong.
+ """
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._request(CMD_RENAME, oldpath, newpath)
+
+ def mkdir(self, path, mode=0777):
+ """
+ Create a folder (directory) named C{path} with numeric mode C{mode}.
+ The default mode is 0777 (octal). On some systems, mode is ignored.
+ Where it is used, the current umask value is first masked out.
+
+ @param path: name of the folder to create.
+ @type path: string
+ @param mode: permissions (posix-style) for the newly-created folder.
+ @type mode: int
+ """
+ path = self._adjust_cwd(path)
+ attr = SFTPAttributes()
+ attr.st_mode = mode
+ self._request(CMD_MKDIR, path, attr)
+
+ def rmdir(self, path):
+ """
+ Remove the folder named C{path}.
+
+ @param path: name of the folder to remove.
+ @type path: string
+ """
+ path = self._adjust_cwd(path)
+ self._request(CMD_RMDIR, path)
+
+ def stat(self, path):
+ """
+ Retrieve information about a file on the remote system. The return
+ value is an object whose attributes correspond to the attributes of
+ python's C{stat} structure as returned by C{os.stat}, except that it
+ contains fewer fields. An SFTP server may return as much or as little
+ info as it wants, so the results may vary from server to server.
+
+ Unlike a python C{stat} object, the result may not be accessed as a
+ tuple. This is mostly due to the author's slack factor.
+
+ The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
+ C{st_atime}, and C{st_mtime}.
+
+ @param path: the filename to stat.
+ @type path: string
+ @return: an object containing attributes about the given file.
+ @rtype: SFTPAttributes
+ """
+ path = self._adjust_cwd(path)
+ t, msg = self._request(CMD_STAT, path)
+ if t != CMD_ATTRS:
+ raise SFTPError('Expected attributes')
+ return SFTPAttributes._from_msg(msg)
+
+ def lstat(self, path):
+ """
+ Retrieve information about a file on the remote system, without
+ following symbolic links (shortcuts). This otherwise behaves exactly
+ the same as L{stat}.
+
+ @param path: the filename to stat.
+ @type path: string
+ @return: an object containing attributes about the given file.
+ @rtype: SFTPAttributes
+ """
+ path = self._adjust_cwd(path)
+ t, msg = self._request(CMD_LSTAT, path)
+ if t != CMD_ATTRS:
+ raise SFTPError('Expected attributes')
+ return SFTPAttributes._from_msg(msg)
+
+ def symlink(self, source, dest):
+ """
+ Create a symbolic link (shortcut) of the C{source} path at
+ C{destination}.
+
+ @param source: path of the original file.
+ @type source: string
+ @param dest: path of the newly created symlink.
+ @type dest: string
+ """
+ dest = self._adjust_cwd(dest)
+ if type(source) is unicode:
+ source = source.encode('utf-8')
+ self._request(CMD_SYMLINK, source, dest)
+
+ def chmod(self, path, mode):
+ """
+ Change the mode (permissions) of a file. The permissions are
+ unix-style and identical to those used by python's C{os.chmod}
+ function.
+
+ @param path: path of the file to change the permissions of.
+ @type path: string
+ @param mode: new permissions.
+ @type mode: int
+ """
+ path = self._adjust_cwd(path)
+ attr = SFTPAttributes()
+ attr.st_mode = mode
+ self._request(CMD_SETSTAT, path, attr)
+
+ def chown(self, path, uid, gid):
+ """
+ Change the owner (C{uid}) and group (C{gid}) of a file. As with
+ python's C{os.chown} function, you must pass both arguments, so if you
+ only want to change one, use L{stat} first to retrieve the current
+ owner and group.
+
+ @param path: path of the file to change the owner and group of.
+ @type path: string
+ @param uid: new owner's uid
+ @type uid: int
+ @param gid: new group id
+ @type gid: int
+ """
+ path = self._adjust_cwd(path)
+ attr = SFTPAttributes()
+ attr.st_uid, attr.st_gid = uid, gid
+ self._request(CMD_SETSTAT, path, attr)
+
+ def utime(self, path, times):
+ """
+ Set the access and modified times of the file specified by C{path}. If
+ C{times} is C{None}, then the file's access and modified times are set
+ to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
+ of the form C{(atime, mtime)}, which is used to set the access and
+ modified times, respectively. This bizarre API is mimicked from python
+ for the sake of consistency -- I apologize.
+
+ @param path: path of the file to modify.
+ @type path: string
+ @param times: C{None} or a tuple of (access time, modified time) in
+ standard internet epoch time (seconds since 01 January 1970 GMT).
+ @type times: tuple of int
+ """
+ path = self._adjust_cwd(path)
+ if times is None:
+ times = (time.time(), time.time())
+ attr = SFTPAttributes()
+ attr.st_atime, attr.st_mtime = times
+ self._request(CMD_SETSTAT, path, attr)
+
+ def readlink(self, path):
+ """
+ Return the target of a symbolic link (shortcut). You can use
+ L{symlink} to create these. The result may be either an absolute or
+ relative pathname.
+
+ @param path: path of the symbolic link file.
+ @type path: str
+ @return: target path.
+ @rtype: str
+ """
+ path = self._adjust_cwd(path)
+ t, msg = self._request(CMD_READLINK, path)
+ if t != CMD_NAME:
+ raise SFTPError('Expected name response')
+ count = msg.get_int()
+ if count == 0:
+ return None
+ if count != 1:
+ raise SFTPError('Readlink returned %d results' % count)
+ return _to_unicode(msg.get_string())
+
+ def normalize(self, path):
+ """
+ Return the normalized path (on the server) of a given path. This
+ can be used to quickly resolve symbolic links or determine what the
+ server is considering to be the "current folder" (by passing C{'.'}
+ as C{path}).
+
+ @param path: path to be normalized.
+ @type path: str
+ @return: normalized form of the given path.
+ @rtype: str
+
+ @raise IOError: if the path can't be resolved on the server
+ """
+ path = self._adjust_cwd(path)
+ t, msg = self._request(CMD_REALPATH, path)
+ if t != CMD_NAME:
+ raise SFTPError('Expected name response')
+ count = msg.get_int()
+ if count != 1:
+ raise SFTPError('Realpath returned %d results' % count)
+ return _to_unicode(msg.get_string())
+
+ def chdir(self, path):
+ """
+ Change the "current directory" of this SFTP session. Since SFTP
+ doesn't really have the concept of a current working directory, this
+ is emulated by paramiko. Once you use this method to set a working
+ directory, all operations on this SFTPClient object will be relative
+ to that path.
+
+ @param path: new current working directory
+ @type path: str
+
+ @raise IOError: if the requested path doesn't exist on the server
+
+ @since: 1.4
+ """
+ self._cwd = self.normalize(path)
+
+ def getcwd(self):
+ """
+ Return the "current working directory" for this SFTP session, as
+ emulated by paramiko. If no directory has been set with L{chdir},
+ this method will return C{None}.
+
+ @return: the current working directory on the server, or C{None}
+ @rtype: str
+
+ @since: 1.4
+ """
+ return self._cwd
+
+ def put(self, localpath, remotepath):
+ """
+ Copy a local file (C{localpath}) to the SFTP server as C{remotepath}.
+ Any exception raised by operations will be passed through. This
+ method is primarily provided as a convenience.
+
+ The SFTP operations use pipelining for speed.
+
+ @param localpath: the local file to copy
+ @type localpath: str
+ @param remotepath: the destination path on the SFTP server
+ @type remotepath: str
+
+ @since: 1.4
+ """
+ fl = file(localpath, 'rb')
+ fr = self.file(remotepath, 'wb')
+ fr.set_pipelined(True)
+ size = 0
+ while True:
+ data = fl.read(32768)
+ if len(data) == 0:
+ break
+ fr.write(data)
+ size += len(data)
+ fl.close()
+ fr.close()
+ s = self.stat(remotepath)
+ if s.st_size != size:
+ raise IOError('size mismatch in put! %d != %d' % (s.st_size, size))
+
+ def get(self, remotepath, localpath):
+ """
+ Copy a remote file (C{remotepath}) from the SFTP server to the local
+ host as C{localpath}. Any exception raised by operations will be
+ passed through. This method is primarily provided as a convenience.
+
+ @param remotepath: the remote file to copy
+ @type remotepath: str
+ @param localpath: the destination path on the local host
+ @type localpath: str
+
+ @since: 1.4
+ """
+ fr = self.file(remotepath, 'rb')
+ fr.prefetch()
+ fl = file(localpath, 'wb')
+ size = 0
+ while True:
+ data = fr.read(32768)
+ if len(data) == 0:
+ break
+ fl.write(data)
+ size += len(data)
+ fl.close()
+ fr.close()
+ s = os.stat(localpath)
+ if s.st_size != size:
+ raise IOError('size mismatch in get! %d != %d' % (s.st_size, size))
+
+
+ ### internals...
+
+
+ def _request(self, t, *arg):
+ num = self._async_request(type(None), t, *arg)
+ return self._read_response(num)
+
+ def _async_request(self, fileobj, t, *arg):
+ # this method may be called from other threads (prefetch)
+ self._lock.acquire()
+ try:
+ msg = Message()
+ msg.add_int(self.request_number)
+ for item in arg:
+ if type(item) is int:
+ msg.add_int(item)
+ elif type(item) is long:
+ msg.add_int64(item)
+ elif type(item) is str:
+ msg.add_string(item)
+ elif type(item) is SFTPAttributes:
+ item._pack(msg)
+ else:
+ raise Exception('unknown type for %r type %r' % (item, type(item)))
+ num = self.request_number
+ self._expecting[num] = fileobj
+ self._send_packet(t, str(msg))
+ self.request_number += 1
+ finally:
+ self._lock.release()
+ return num
+
+ def _read_response(self, waitfor=None):
+ while True:
+ t, data = self._read_packet()
+ msg = Message(data)
+ num = msg.get_int()
+ if num not in self._expecting:
+ # might be response for a file that was closed before responses came back
+ self._log(DEBUG, 'Unexpected response #%d' % (num,))
+ if waitfor is None:
+ # just doing a single check
+ return
+ continue
+ fileobj = self._expecting[num]
+ del self._expecting[num]
+ if num == waitfor:
+ # synchronous
+ if t == CMD_STATUS:
+ self._convert_status(msg)
+ return t, msg
+ if fileobj is not type(None):
+ fileobj._async_response(t, msg)
+ if waitfor is None:
+ # just doing a single check
+ return
+
+ def _finish_responses(self, fileobj):
+ while fileobj in self._expecting.values():
+ self._read_response()
+ fileobj._check_exception()
+
+ def _convert_status(self, msg):
+ """
+ Raises EOFError or IOError on error status; otherwise does nothing.
+ """
+ code = msg.get_int()
+ text = msg.get_string()
+ if code == SFTP_OK:
+ return
+ elif code == SFTP_EOF:
+ raise EOFError(text)
+ elif code == SFTP_NO_SUCH_FILE:
+ # clever idea from john a. meinel: map the error codes to errno
+ raise IOError(errno.ENOENT, text)
+ elif code == SFTP_PERMISSION_DENIED:
+ raise IOError(errno.EACCES, text)
+ else:
+ raise IOError(text)
+
+ def _adjust_cwd(self, path):
+ """
+ Return an adjusted path if we're emulating a "current working
+ directory" for the server.
+ """
+ if type(path) is unicode:
+ path = path.encode('utf-8')
+ if self._cwd is None:
+ return path
+ if (len(path) > 0) and (path[0] == '/'):
+ # absolute path
+ return path
+ return self._cwd + '/' + path
+
+
+class SFTP (SFTPClient):
+ "an alias for L{SFTPClient} for backwards compatability"
+ pass
diff --git a/paramiko/sftp_file.py b/paramiko/sftp_file.py
new file mode 100644
index 0000000..f224f02
--- /dev/null
+++ b/paramiko/sftp_file.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{SFTPFile}
+"""
+
+import threading
+from paramiko.common import *
+from paramiko.sftp import *
+from paramiko.file import BufferedFile
+from paramiko.sftp_attr import SFTPAttributes
+
+
+class SFTPFile (BufferedFile):
+ """
+ Proxy object for a file on the remote server, in client mode SFTP.
+ """
+
+ # Some sftp servers will choke if you send read/write requests larger than
+ # this size.
+ MAX_REQUEST_SIZE = 32768
+
+ def __init__(self, sftp, handle, mode='r', bufsize=-1):
+ BufferedFile.__init__(self)
+ self.sftp = sftp
+ self.handle = handle
+ BufferedFile._set_mode(self, mode, bufsize)
+ self.pipelined = False
+ self._prefetching = False
+ self._saved_exception = None
+
+ def __del__(self):
+ self.close(_async=True)
+
+ def close(self, _async=False):
+ # We allow double-close without signaling an error, because real
+ # Python file objects do. However, we must protect against actually
+ # sending multiple CMD_CLOSE packets, because after we close our
+ # handle, the same handle may be re-allocated by the server, and we
+ # may end up mysteriously closing some random other file. (This is
+ # especially important because we unconditionally call close() from
+ # __del__.)
+ if self._closed:
+ return
+ if self.pipelined:
+ self.sftp._finish_responses(self)
+ BufferedFile.close(self)
+ try:
+ if _async:
+ # GC'd file handle could be called from an arbitrary thread -- don't wait for a response
+ self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
+ else:
+ self.sftp._request(CMD_CLOSE, self.handle)
+ except EOFError:
+ # may have outlived the Transport connection
+ pass
+ except IOError:
+ # may have outlived the Transport connection
+ pass
+
+ def _read_prefetch(self, size):
+ # while not closed, and haven't fetched past the current position, and haven't reached EOF...
+ while (self._prefetch_so_far <= self._realpos) and \
+ (self._prefetch_so_far < self._prefetch_size) and not self._closed:
+ self.sftp._read_response()
+ self._check_exception()
+ k = self._prefetch_data.keys()
+ k.sort()
+ while (len(k) > 0) and (k[0] + len(self._prefetch_data[k[0]]) <= self._realpos):
+ # done with that block
+ del self._prefetch_data[k[0]]
+ k.pop(0)
+ if len(k) == 0:
+ self._prefetching = False
+ return ''
+ assert k[0] <= self._realpos
+ buf_offset = self._realpos - k[0]
+ buf_length = len(self._prefetch_data[k[0]]) - buf_offset
+ return self._prefetch_data[k[0]][buf_offset : buf_offset + buf_length]
+
+ def _read(self, size):
+ size = min(size, self.MAX_REQUEST_SIZE)
+ if self._prefetching:
+ return self._read_prefetch(size)
+ t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
+ if t != CMD_DATA:
+ raise SFTPError('Expected data')
+ return msg.get_string()
+
+ def _write(self, data):
+ # may write less than requested if it would exceed max packet size
+ chunk = min(len(data), self.MAX_REQUEST_SIZE)
+ req = self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos),
+ str(data[:chunk]))
+ if not self.pipelined or self.sftp.sock.recv_ready():
+ t, msg = self.sftp._read_response(req)
+ if t != CMD_STATUS:
+ raise SFTPError('Expected status')
+ # convert_status already called
+ return chunk
+
+ def settimeout(self, timeout):
+ """
+ Set a timeout on read/write operations on the underlying socket or
+ ssh L{Channel}.
+
+ @see: L{Channel.settimeout}
+ @param timeout: seconds to wait for a pending read/write operation
+ before raising C{socket.timeout}, or C{None} for no timeout
+ @type timeout: float
+ """
+ self.sftp.sock.settimeout(timeout)
+
+ def gettimeout(self):
+ """
+ Returns the timeout in seconds (as a float) associated with the socket
+ or ssh L{Channel} used for this file.
+
+ @see: L{Channel.gettimeout}
+ @rtype: float
+ """
+ return self.sftp.sock.gettimeout()
+
+ def setblocking(self, blocking):
+ """
+ Set blocking or non-blocking mode on the underiying socket or ssh
+ L{Channel}.
+
+ @see: L{Channel.setblocking}
+ @param blocking: 0 to set non-blocking mode; non-0 to set blocking
+ mode.
+ @type blocking: int
+ """
+ self.sftp.sock.setblocking(blocking)
+
+ def seek(self, offset, whence=0):
+ self.flush()
+ if whence == self.SEEK_SET:
+ self._realpos = self._pos = offset
+ elif whence == self.SEEK_CUR:
+ self._pos += offset
+ self._realpos = self._pos
+ else:
+ self._realpos = self._pos = self._get_size() + offset
+ self._rbuffer = ''
+
+ def stat(self):
+ """
+ Retrieve information about this file from the remote system. This is
+ exactly like L{SFTP.stat}, except that it operates on an already-open
+ file.
+
+ @return: an object containing attributes about this file.
+ @rtype: SFTPAttributes
+ """
+ t, msg = self.sftp._request(CMD_FSTAT, self.handle)
+ if t != CMD_ATTRS:
+ raise SFTPError('Expected attributes')
+ return SFTPAttributes._from_msg(msg)
+
+ def check(self, hash_algorithm, offset=0, length=0, block_size=0):
+ """
+ Ask the server for a hash of a section of this file. This can be used
+ to verify a successful upload or download, or for various rsync-like
+ operations.
+
+ The file is hashed from C{offset}, for C{length} bytes. If C{length}
+ is 0, the remainder of the file is hashed. Thus, if both C{offset}
+ and C{length} are zero, the entire file is hashed.
+
+ Normally, C{block_size} will be 0 (the default), and this method will
+ return a byte string representing the requested hash (for example, a
+ string of length 16 for MD5, or 20 for SHA-1). If a non-zero
+ C{block_size} is given, each chunk of the file (from C{offset} to
+ C{offset + length}) of C{block_size} bytes is computed as a separate
+ hash. The hash results are all concatenated and returned as a single
+ string.
+
+ For example, C{check('sha1', 0, 1024, 512)} will return a string of
+ length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
+ of the file, and the last 20 bytes will be the SHA-1 of the next 512
+ bytes.
+
+ @param hash_algorithm: the name of the hash algorithm to use (normally
+ C{"sha1"} or C{"md5"})
+ @type hash_algorithm: str
+ @param offset: offset into the file to begin hashing (0 means to start
+ from the beginning)
+ @type offset: int or long
+ @param length: number of bytes to hash (0 means continue to the end of
+ the file)
+ @type length: int or long
+ @param block_size: number of bytes to hash per result (must not be less
+ than 256; 0 means to compute only one hash of the entire segment)
+ @type block_size: int
+ @return: string of bytes representing the hash of each block,
+ concatenated together
+ @rtype: str
+
+ @note: Many (most?) servers don't support this extension yet.
+
+ @raise IOError: if the server doesn't support the "check-file"
+ extension, or possibly doesn't support the hash algorithm
+ requested
+
+ @since: 1.4
+ """
+ t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle,
+ hash_algorithm, long(offset), long(length), block_size)
+ ext = msg.get_string()
+ alg = msg.get_string()
+ data = msg.get_remainder()
+ return data
+
+ def set_pipelined(self, pipelined=True):
+ """
+ Turn on/off the pipelining of write operations to this file. When
+ pipelining is on, paramiko won't wait for the server response after
+ each write operation. Instead, they're collected as they come in.
+ At the first non-write operation (including L{close}), all remaining
+ server responses are collected. This means that if there was an error
+ with one of your later writes, an exception might be thrown from
+ within L{close} instead of L{write}.
+
+ By default, files are I{not} pipelined.
+
+ @param pipelined: C{True} if pipelining should be turned on for this
+ file; C{False} otherwise
+ @type pipelined: bool
+
+ @since: 1.5
+ """
+ self.pipelined = pipelined
+
+ def prefetch(self):
+ """
+ Pre-fetch the remaining contents of this file in anticipation of
+ future L{read} calls. If reading the entire file, pre-fetching can
+ dramatically improve the download speed by avoiding roundtrip latency.
+ The file's contents are incrementally buffered in a background thread.
+
+ @since: 1.5.1
+ """
+ size = self.stat().st_size
+ # queue up async reads for the rest of the file
+ self._prefetching = True
+ self._prefetch_so_far = self._realpos
+ self._prefetch_size = size
+ self._prefetch_data = {}
+ t = threading.Thread(target=self._prefetch)
+ t.setDaemon(True)
+ t.start()
+
+ def _prefetch(self):
+ n = self._realpos
+ size = self._prefetch_size
+ while n < size:
+ chunk = min(self.MAX_REQUEST_SIZE, size - n)
+ self.sftp._async_request(self, CMD_READ, self.handle, long(n), int(chunk))
+ n += chunk
+
+
+ ### internals...
+
+
+ def _get_size(self):
+ try:
+ return self.stat().st_size
+ except:
+ return 0
+
+ def _async_response(self, t, msg):
+ if t == CMD_STATUS:
+ # save exception and re-raise it on next file operation
+ try:
+ self.sftp._convert_status(msg)
+ except Exception, x:
+ self._saved_exception = x
+ return
+ if t != CMD_DATA:
+ raise SFTPError('Expected data')
+ data = msg.get_string()
+ self._prefetch_data[self._prefetch_so_far] = data
+ self._prefetch_so_far += len(data)
+
+ def _check_exception(self):
+ "if there's a saved exception, raise & clear it"
+ if self._saved_exception is not None:
+ x = self._saved_exception
+ self._saved_exception = None
+ raise x
diff --git a/paramiko/sftp_handle.py b/paramiko/sftp_handle.py
new file mode 100644
index 0000000..e1d93e9
--- /dev/null
+++ b/paramiko/sftp_handle.py
@@ -0,0 +1,188 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Abstraction of an SFTP file handle (for server mode).
+"""
+
+import os
+
+from paramiko.common import *
+from paramiko.sftp import *
+
+
+class SFTPHandle (object):
+ """
+ Abstract object representing a handle to an open file (or folder) in an
+ SFTP server implementation. Each handle has a string representation used
+ by the client to refer to the underlying file.
+
+ Server implementations can (and should) subclass SFTPHandle to implement
+ features of a file handle, like L{stat} or L{chattr}.
+ """
+ def __init__(self):
+ self.__name = None
+ # only for handles to folders:
+ self.__files = { }
+ self.__tell = None
+
+ def close(self):
+ """
+ When a client closes a file, this method is called on the handle.
+ Normally you would use this method to close the underlying OS level
+ file object(s).
+
+ The default implementation checks for attributes on C{self} named
+ C{readfile} and/or C{writefile}, and if either or both are present,
+ their C{close()} methods are called. This means that if you are
+ using the default implementations of L{read} and L{write}, this
+ method's default implementation should be fine also.
+ """
+ readfile = getattr(self, 'readfile', None)
+ if readfile is not None:
+ readfile.close()
+ writefile = getattr(self, 'writefile', None)
+ if writefile is not None:
+ writefile.close()
+
+ def read(self, offset, length):
+ """
+ Read up to C{length} bytes from this file, starting at position
+ C{offset}. The offset may be a python long, since SFTP allows it
+ to be 64 bits.
+
+ If the end of the file has been reached, this method may return an
+ empty string to signify EOF, or it may also return L{SFTP_EOF}.
+
+ The default implementation checks for an attribute on C{self} named
+ C{readfile}, and if present, performs the read operation on the python
+ file-like object found there. (This is meant as a time saver for the
+ common case where you are wrapping a python file object.)
+
+ @param offset: position in the file to start reading from.
+ @type offset: int or long
+ @param length: number of bytes to attempt to read.
+ @type length: int
+ @return: data read from the file, or an SFTP error code.
+ @rtype: str
+ """
+ if not hasattr(self, 'readfile') or (self.readfile is None):
+ return SFTP_OP_UNSUPPORTED
+ try:
+ if self.__tell is None:
+ self.__tell = self.readfile.tell()
+ if offset != self.__tell:
+ self.readfile.seek(offset)
+ self.__tell = offset
+ data = self.readfile.read(length)
+ except IOError, e:
+ self.__tell = None
+ return SFTPServer.convert_errno(e.errno)
+ self.__tell += len(data)
+ return data
+
+ def write(self, offset, data):
+ """
+ Write C{data} into this file at position C{offset}. Extending the
+ file past its original end is expected. Unlike python's normal
+ C{write()} methods, this method cannot do a partial write: it must
+ write all of C{data} or else return an error.
+
+ The default implementation checks for an attribute on C{self} named
+ C{writefile}, and if present, performs the write operation on the
+ python file-like object found there. The attribute is named
+ differently from C{readfile} to make it easy to implement read-only
+ (or write-only) files, but if both attributes are present, they should
+ refer to the same file.
+
+ @param offset: position in the file to start reading from.
+ @type offset: int or long
+ @param data: data to write into the file.
+ @type data: str
+ @return: an SFTP error code like L{SFTP_OK}.
+ """
+ if not hasattr(self, 'writefile') or (self.writefile is None):
+ return SFTP_OP_UNSUPPORTED
+ try:
+ if self.__tell is None:
+ self.__tell = self.writefile.tell()
+ if offset != self.__tell:
+ self.writefile.seek(offset)
+ self.__tell = offset
+ self.writefile.write(data)
+ self.writefile.flush()
+ except IOError, e:
+ self.__tell = None
+ return SFTPServer.convert_errno(e.errno)
+ self.__tell += len(data)
+ return SFTP_OK
+
+ def stat(self):
+ """
+ Return an L{SFTPAttributes} object referring to this open file, or an
+ error code. This is equivalent to L{SFTPServerInterface.stat}, except
+ it's called on an open file instead of a path.
+
+ @return: an attributes object for the given file, or an SFTP error
+ code (like L{SFTP_PERMISSION_DENIED}).
+ @rtype: L{SFTPAttributes} I{or error code}
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def chattr(self, attr):
+ """
+ Change the attributes of this file. The C{attr} object will contain
+ only those fields provided by the client in its request, so you should
+ check for the presence of fields before using them.
+
+ @param attr: the attributes to change on this file.
+ @type attr: L{SFTPAttributes}
+ @return: an error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+
+ ### internals...
+
+
+ def _set_files(self, files):
+ """
+ Used by the SFTP server code to cache a directory listing. (In
+ the SFTP protocol, listing a directory is a multi-stage process
+ requiring a temporary handle.)
+ """
+ self.__files = files
+
+ def _get_next_files(self):
+ """
+ Used by the SFTP server code to retreive a cached directory
+ listing.
+ """
+ fnlist = self.__files[:16]
+ self.__files = self.__files[16:]
+ return fnlist
+
+ def _get_name(self):
+ return self.__name
+
+ def _set_name(self, name):
+ self.__name = name
+
+
+from paramiko.sftp_server import SFTPServer
diff --git a/paramiko/sftp_server.py b/paramiko/sftp_server.py
new file mode 100644
index 0000000..5905843
--- /dev/null
+++ b/paramiko/sftp_server.py
@@ -0,0 +1,420 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Server-mode SFTP support.
+"""
+
+import os
+import errno
+
+from Crypto.Hash import MD5, SHA
+from paramiko.common import *
+from paramiko.server import SubsystemHandler
+from paramiko.sftp import *
+from paramiko.sftp_si import *
+from paramiko.sftp_attr import *
+
+
+# known hash algorithms for the "check-file" extension
+_hash_class = {
+ 'sha1': SHA,
+ 'md5': MD5,
+}
+
+
+class SFTPServer (BaseSFTP, SubsystemHandler):
+ """
+ Server-side SFTP subsystem support. Since this is a L{SubsystemHandler},
+ it can be (and is meant to be) set as the handler for C{"sftp"} requests.
+ Use L{Transport.set_subsystem_handler} to activate this class.
+ """
+
+ def __init__(self, channel, name, server, sftp_si=SFTPServerInterface, *largs, **kwargs):
+ """
+ The constructor for SFTPServer is meant to be called from within the
+ L{Transport} as a subsystem handler. C{server} and any additional
+ parameters or keyword parameters are passed from the original call to
+ L{Transport.set_subsystem_handler}.
+
+ @param channel: channel passed from the L{Transport}.
+ @type channel: L{Channel}
+ @param name: name of the requested subsystem.
+ @type name: str
+ @param server: the server object associated with this channel and
+ subsystem
+ @type server: L{ServerInterface}
+ @param sftp_si: a subclass of L{SFTPServerInterface} to use for handling
+ individual requests.
+ @type sftp_si: class
+ """
+ BaseSFTP.__init__(self)
+ SubsystemHandler.__init__(self, channel, name, server)
+ transport = channel.get_transport()
+ self.logger = util.get_logger(transport.get_log_channel() + '.' +
+ channel.get_name() + '.sftp')
+ self.ultra_debug = transport.get_hexdump()
+ self.next_handle = 1
+ # map of handle-string to SFTPHandle for files & folders:
+ self.file_table = { }
+ self.folder_table = { }
+ self.server = sftp_si(server, *largs, **kwargs)
+
+ def start_subsystem(self, name, transport, channel):
+ self.sock = channel
+ self._log(DEBUG, 'Started sftp server on channel %s' % repr(channel))
+ self._send_server_version()
+ self.server.session_started()
+ while True:
+ try:
+ t, data = self._read_packet()
+ except EOFError:
+ self._log(DEBUG, 'EOF -- end of session')
+ return
+ except Exception, e:
+ self._log(DEBUG, 'Exception on channel: ' + str(e))
+ self._log(DEBUG, util.tb_strings())
+ return
+ msg = Message(data)
+ request_number = msg.get_int()
+ self._process(t, request_number, msg)
+
+ def finish_subsystem(self):
+ self.server.session_ended()
+ # close any file handles that were left open (so we can return them to the OS quickly)
+ for f in self.file_table.itervalues():
+ f.close()
+ for f in self.folder_table.itervalues():
+ f.close()
+ self.file_table = {}
+ self.folder_table = {}
+
+ def convert_errno(e):
+ """
+ Convert an errno value (as from an C{OSError} or C{IOError}) into a
+ standard SFTP result code. This is a convenience function for trapping
+ exceptions in server code and returning an appropriate result.
+
+ @param e: an errno code, as from C{OSError.errno}.
+ @type e: int
+ @return: an SFTP error code like L{SFTP_NO_SUCH_FILE}.
+ @rtype: int
+ """
+ if e == errno.EACCES:
+ # permission denied
+ return SFTP_PERMISSION_DENIED
+ elif e == errno.ENOENT:
+ # no such file
+ return SFTP_NO_SUCH_FILE
+ else:
+ return SFTP_FAILURE
+ convert_errno = staticmethod(convert_errno)
+
+ def set_file_attr(filename, attr):
+ """
+ Change a file's attributes on the local filesystem. The contents of
+ C{attr} are used to change the permissions, owner, group ownership,
+ and/or modification & access time of the file, depending on which
+ attributes are present in C{attr}.
+
+ This is meant to be a handy helper function for translating SFTP file
+ requests into local file operations.
+
+ @param filename: name of the file to alter (should usually be an
+ absolute path).
+ @type filename: str
+ @param attr: attributes to change.
+ @type attr: L{SFTPAttributes}
+ """
+ if attr._flags & attr.FLAG_PERMISSIONS:
+ os.chmod(filename, attr.st_mode)
+ if attr._flags & attr.FLAG_UIDGID:
+ os.chown(filename, attr.st_uid, attr.st_gid)
+ if attr._flags & attr.FLAG_AMTIME:
+ os.utime(filename, (attr.st_atime, attr.st_mtime))
+ set_file_attr = staticmethod(set_file_attr)
+
+
+ ### internals...
+
+
+ def _response(self, request_number, t, *arg):
+ msg = Message()
+ msg.add_int(request_number)
+ for item in arg:
+ if type(item) is int:
+ msg.add_int(item)
+ elif type(item) is long:
+ msg.add_int64(item)
+ elif type(item) is str:
+ msg.add_string(item)
+ elif type(item) is SFTPAttributes:
+ item._pack(msg)
+ else:
+ raise Exception('unknown type for ' + repr(item) + ' type ' + repr(type(item)))
+ self._send_packet(t, str(msg))
+
+ def _send_handle_response(self, request_number, handle, folder=False):
+ if not issubclass(type(handle), SFTPHandle):
+ # must be error code
+ self._send_status(request_number, handle)
+ return
+ handle._set_name('hx%d' % self.next_handle)
+ self.next_handle += 1
+ if folder:
+ self.folder_table[handle._get_name()] = handle
+ else:
+ self.file_table[handle._get_name()] = handle
+ self._response(request_number, CMD_HANDLE, handle._get_name())
+
+ def _send_status(self, request_number, code, desc=None):
+ if desc is None:
+ desc = SFTP_DESC[code]
+ self._response(request_number, CMD_STATUS, code, desc)
+
+ def _open_folder(self, request_number, path):
+ resp = self.server.list_folder(path)
+ if issubclass(type(resp), list):
+ # got an actual list of filenames in the folder
+ folder = SFTPHandle()
+ folder._set_files(resp)
+ self._send_handle_response(request_number, folder, True)
+ return
+ # must be an error code
+ self._send_status(request_number, resp)
+
+ def _read_folder(self, request_number, folder):
+ flist = folder._get_next_files()
+ if len(flist) == 0:
+ self._send_status(request_number, SFTP_EOF)
+ return
+ msg = Message()
+ msg.add_int(request_number)
+ msg.add_int(len(flist))
+ for attr in flist:
+ msg.add_string(attr.filename)
+ msg.add_string(str(attr))
+ attr._pack(msg)
+ self._send_packet(CMD_NAME, str(msg))
+
+ def _check_file(self, request_number, msg):
+ # this extension actually comes from v6 protocol, but since it's an
+ # extension, i feel like we can reasonably support it backported.
+ # it's very useful for verifying uploaded files or checking for
+ # rsync-like differences between local and remote files.
+ handle = msg.get_string()
+ alg_list = msg.get_list()
+ start = msg.get_int64()
+ length = msg.get_int64()
+ block_size = msg.get_int()
+ if not self.file_table.has_key(handle):
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ f = self.file_table[handle]
+ for x in alg_list:
+ if x in _hash_class:
+ algname = x
+ alg = _hash_class[x]
+ break
+ else:
+ self._send_status(request_number, SFTP_FAILURE, 'No supported hash types found')
+ return
+ if length == 0:
+ st = f.stat()
+ if not issubclass(type(st), SFTPAttributes):
+ self._send_status(request_number, st, 'Unable to stat file')
+ return
+ length = st.st_size - start
+ if block_size == 0:
+ block_size = length
+ if block_size < 256:
+ self._send_status(request_number, SFTP_FAILURE, 'Block size too small')
+ return
+
+ sum = ''
+ offset = start
+ while offset < start + length:
+ blocklen = min(block_size, start + length - offset)
+ # don't try to read more than about 64KB at a time
+ chunklen = min(blocklen, 65536)
+ count = 0
+ hash = alg.new()
+ while count < blocklen:
+ data = f.read(offset, chunklen)
+ if not type(data) is str:
+ self._send_status(request_number, data, 'Unable to hash file')
+ return
+ hash.update(data)
+ count += len(data)
+ offset += count
+ sum += hash.digest()
+
+ msg = Message()
+ msg.add_int(request_number)
+ msg.add_string('check-file')
+ msg.add_string(algname)
+ msg.add_bytes(sum)
+ self._send_packet(CMD_EXTENDED_REPLY, str(msg))
+
+ def _convert_pflags(self, pflags):
+ "convert SFTP-style open() flags to python's os.open() flags"
+ if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
+ flags = os.O_RDWR
+ elif pflags & SFTP_FLAG_WRITE:
+ flags = os.O_WRONLY
+ else:
+ flags = os.O_RDONLY
+ if pflags & SFTP_FLAG_APPEND:
+ flags |= os.O_APPEND
+ if pflags & SFTP_FLAG_CREATE:
+ flags |= os.O_CREAT
+ if pflags & SFTP_FLAG_TRUNC:
+ flags |= os.O_TRUNC
+ if pflags & SFTP_FLAG_EXCL:
+ flags |= os.O_EXCL
+ return flags
+
+ def _process(self, t, request_number, msg):
+ self._log(DEBUG, 'Request: %s' % CMD_NAMES[t])
+ if t == CMD_OPEN:
+ path = msg.get_string()
+ flags = self._convert_pflags(msg.get_int())
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_handle_response(request_number, self.server.open(path, flags, attr))
+ elif t == CMD_CLOSE:
+ handle = msg.get_string()
+ if self.folder_table.has_key(handle):
+ del self.folder_table[handle]
+ self._send_status(request_number, SFTP_OK)
+ return
+ if self.file_table.has_key(handle):
+ self.file_table[handle].close()
+ del self.file_table[handle]
+ self._send_status(request_number, SFTP_OK)
+ return
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ elif t == CMD_READ:
+ handle = msg.get_string()
+ offset = msg.get_int64()
+ length = msg.get_int()
+ if not self.file_table.has_key(handle):
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ data = self.file_table[handle].read(offset, length)
+ if type(data) is str:
+ if len(data) == 0:
+ self._send_status(request_number, SFTP_EOF)
+ else:
+ self._response(request_number, CMD_DATA, data)
+ else:
+ self._send_status(request_number, data)
+ elif t == CMD_WRITE:
+ handle = msg.get_string()
+ offset = msg.get_int64()
+ data = msg.get_string()
+ if not self.file_table.has_key(handle):
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ self._send_status(request_number, self.file_table[handle].write(offset, data))
+ elif t == CMD_REMOVE:
+ path = msg.get_string()
+ self._send_status(request_number, self.server.remove(path))
+ elif t == CMD_RENAME:
+ oldpath = msg.get_string()
+ newpath = msg.get_string()
+ self._send_status(request_number, self.server.rename(oldpath, newpath))
+ elif t == CMD_MKDIR:
+ path = msg.get_string()
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_status(request_number, self.server.mkdir(path, attr))
+ elif t == CMD_RMDIR:
+ path = msg.get_string()
+ self._send_status(request_number, self.server.rmdir(path))
+ elif t == CMD_OPENDIR:
+ path = msg.get_string()
+ self._open_folder(request_number, path)
+ return
+ elif t == CMD_READDIR:
+ handle = msg.get_string()
+ if not self.folder_table.has_key(handle):
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ folder = self.folder_table[handle]
+ self._read_folder(request_number, folder)
+ elif t == CMD_STAT:
+ path = msg.get_string()
+ resp = self.server.stat(path)
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_LSTAT:
+ path = msg.get_string()
+ resp = self.server.lstat(path)
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_FSTAT:
+ handle = msg.get_string()
+ if not self.file_table.has_key(handle):
+ self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ resp = self.file_table[handle].stat()
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_SETSTAT:
+ path = msg.get_string()
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_status(request_number, self.server.chattr(path, attr))
+ elif t == CMD_FSETSTAT:
+ handle = msg.get_string()
+ attr = SFTPAttributes._from_msg(msg)
+ if not self.file_table.has_key(handle):
+ self._response(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ return
+ self._send_status(request_number, self.file_table[handle].chattr(attr))
+ elif t == CMD_READLINK:
+ path = msg.get_string()
+ resp = self.server.readlink(path)
+ if type(resp) is str:
+ self._response(request_number, CMD_NAME, 1, resp, '', SFTPAttributes())
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_SYMLINK:
+ # the sftp 2 draft is incorrect here! path always follows target_path
+ target_path = msg.get_string()
+ path = msg.get_string()
+ self._send_status(request_number, self.server.symlink(target_path, path))
+ elif t == CMD_REALPATH:
+ path = msg.get_string()
+ rpath = self.server.canonicalize(path)
+ self._response(request_number, CMD_NAME, 1, rpath, '', SFTPAttributes())
+ elif t == CMD_EXTENDED:
+ tag = msg.get_string()
+ if tag == 'check-file':
+ self._check_file(request_number, msg)
+ else:
+ send._send_status(request_number, SFTP_OP_UNSUPPORTED)
+ else:
+ self._send_status(request_number, SFTP_OP_UNSUPPORTED)
+
+
+from paramiko.sftp_handle import SFTPHandle
diff --git a/paramiko/sftp_si.py b/paramiko/sftp_si.py
new file mode 100644
index 0000000..16005d4
--- /dev/null
+++ b/paramiko/sftp_si.py
@@ -0,0 +1,303 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{SFTPServerInterface} is an interface to override for SFTP server support.
+"""
+
+import os
+
+from paramiko.common import *
+from paramiko.sftp import *
+
+
+class SFTPServerInterface (object):
+ """
+ This class defines an interface for controlling the behavior of paramiko
+ when using the L{SFTPServer} subsystem to provide an SFTP server.
+
+ Methods on this class are called from the SFTP session's thread, so you can
+ block as long as necessary without affecting other sessions (even other
+ SFTP sessions). However, raising an exception will usually cause the SFTP
+ session to abruptly end, so you will usually want to catch exceptions and
+ return an appropriate error code.
+ """
+
+ def __init__ (self, server, *largs, **kwargs):
+ """
+ Create a new SFTPServerInterface object. This method does nothing by
+ default and is meant to be overridden by subclasses.
+
+ @param server: the server object associated with this channel and
+ SFTP subsystem
+ @type server: L{ServerInterface}
+ """
+ super(SFTPServerInterface, self).__init__(*largs, **kwargs)
+
+ def session_started(self):
+ """
+ The SFTP server session has just started. This method is meant to be
+ overridden to perform any necessary setup before handling callbacks
+ from SFTP operations.
+ """
+ pass
+
+ def session_ended(self):
+ """
+ The SFTP server session has just ended, either cleanly or via an
+ exception. This method is meant to be overridden to perform any
+ necessary cleanup before this C{SFTPServerInterface} object is
+ destroyed.
+ """
+ pass
+
+ def open(self, path, flags, attr):
+ """
+ Open a file on the server and create a handle for future operations
+ on that file. On success, a new object subclassed from L{SFTPHandle}
+ should be returned. This handle will be used for future operations
+ on the file (read, write, etc). On failure, an error code such as
+ L{SFTP_PERMISSION_DENIED} should be returned.
+
+ C{flags} contains the requested mode for opening (read-only,
+ write-append, etc) as a bitset of flags from the C{os} module:
+ - C{os.O_RDONLY}
+ - C{os.O_WRONLY}
+ - C{os.O_RDWR}
+ - C{os.O_APPEND}
+ - C{os.O_CREAT}
+ - C{os.O_TRUNC}
+ - C{os.O_EXCL}
+ (One of C{os.O_RDONLY}, C{os.O_WRONLY}, or C{os.O_RDWR} will always
+ be set.)
+
+ The C{attr} object contains requested attributes of the file if it
+ has to be created. Some or all attribute fields may be missing if
+ the client didn't specify them.
+
+ @note: The SFTP protocol defines all files to be in "binary" mode.
+ There is no equivalent to python's "text" mode.
+
+ @param path: the requested path (relative or absolute) of the file
+ to be opened.
+ @type path: str
+ @param flags: flags or'd together from the C{os} module indicating the
+ requested mode for opening the file.
+ @type flags: int
+ @param attr: requested attributes of the file if it is newly created.
+ @type attr: L{SFTPAttributes}
+ @return: a new L{SFTPHandle} I{or error code}.
+ @rtype L{SFTPHandle}
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def list_folder(self, path):
+ """
+ Return a list of files within a given folder. The C{path} will use
+ posix notation (C{"/"} separates folder names) and may be an absolute
+ or relative path.
+
+ The list of files is expected to be a list of L{SFTPAttributes}
+ objects, which are similar in structure to the objects returned by
+ C{os.stat}. In addition, each object should have its C{filename}
+ field filled in, since this is important to a directory listing and
+ not normally present in C{os.stat} results. The method
+ L{SFTPAttributes.from_stat} will usually do what you want.
+
+ In case of an error, you should return one of the C{SFTP_*} error
+ codes, such as L{SFTP_PERMISSION_DENIED}.
+
+ @param path: the requested path (relative or absolute) to be listed.
+ @type path: str
+ @return: a list of the files in the given folder, using
+ L{SFTPAttributes} objects.
+ @rtype: list of L{SFTPAttributes} I{or error code}
+
+ @note: You should normalize the given C{path} first (see the
+ C{os.path} module) and check appropriate permissions before returning
+ the list of files. Be careful of malicious clients attempting to use
+ relative paths to escape restricted folders, if you're doing a direct
+ translation from the SFTP server path to your local filesystem.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def stat(self, path):
+ """
+ Return an L{SFTPAttributes} object for a path on the server, or an
+ error code. If your server supports symbolic links (also known as
+ "aliases"), you should follow them. (L{lstat} is the corresponding
+ call that doesn't follow symlinks/aliases.)
+
+ @param path: the requested path (relative or absolute) to fetch
+ file statistics for.
+ @type path: str
+ @return: an attributes object for the given file, or an SFTP error
+ code (like L{SFTP_PERMISSION_DENIED}).
+ @rtype: L{SFTPAttributes} I{or error code}
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def lstat(self, path):
+ """
+ Return an L{SFTPAttributes} object for a path on the server, or an
+ error code. If your server supports symbolic links (also known as
+ "aliases"), you should I{not} follow them -- instead, you should
+ return data on the symlink or alias itself. (L{stat} is the
+ corresponding call that follows symlinks/aliases.)
+
+ @param path: the requested path (relative or absolute) to fetch
+ file statistics for.
+ @type path: str
+ @return: an attributes object for the given file, or an SFTP error
+ code (like L{SFTP_PERMISSION_DENIED}).
+ @rtype: L{SFTPAttributes} I{or error code}
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def remove(self, path):
+ """
+ Delete a file, if possible.
+
+ @param path: the requested path (relative or absolute) of the file
+ to delete.
+ @type path: str
+ @return: an SFTP error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def rename(self, oldpath, newpath):
+ """
+ Rename (or move) a file. The SFTP specification implies that this
+ method can be used to move an existing file into a different folder,
+ and since there's no other (easy) way to move files via SFTP, it's
+ probably a good idea to implement "move" in this method too, even for
+ files that cross disk partition boundaries, if at all possible.
+
+ @note: You should return an error if a file with the same name as
+ C{newpath} already exists. (The rename operation should be
+ non-desctructive.)
+
+ @param oldpath: the requested path (relative or absolute) of the
+ existing file.
+ @type oldpath: str
+ @param newpath: the requested new path of the file.
+ @type newpath: str
+ @return: an SFTP error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def mkdir(self, path, attr):
+ """
+ Create a new directory with the given attributes. The C{attr}
+ object may be considered a "hint" and ignored.
+
+ The C{attr} object will contain only those fields provided by the
+ client in its request, so you should use C{hasattr} to check for
+ the presense of fields before using them. In some cases, the C{attr}
+ object may be completely empty.
+
+ @param path: requested path (relative or absolute) of the new
+ folder.
+ @type path: str
+ @param attr: requested attributes of the new folder.
+ @type attr: L{SFTPAttributes}
+ @return: an SFTP error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def rmdir(self, path):
+ """
+ Remove a directory if it exists. The C{path} should refer to an
+ existing, empty folder -- otherwise this method should return an
+ error.
+
+ @param path: requested path (relative or absolute) of the folder
+ to remove.
+ @type path: str
+ @return: an SFTP error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def chattr(self, path, attr):
+ """
+ Change the attributes of a file. The C{attr} object will contain
+ only those fields provided by the client in its request, so you
+ should check for the presence of fields before using them.
+
+ @param path: requested path (relative or absolute) of the file to
+ change.
+ @type path: str
+ @param attr: requested attributes to change on the file.
+ @type attr: L{SFTPAttributes}
+ @return: an error code like L{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def canonicalize(self, path):
+ """
+ Return the canonical form of a path on the server. For example,
+ if the server's home folder is C{/home/foo}, the path
+ C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
+ the obvious security issues: if you're serving files only from a
+ specific folder, you probably don't want this method to reveal path
+ names outside that folder.
+
+ You may find the python methods in C{os.path} useful, especially
+ C{os.path.normpath} and C{os.path.realpath}.
+
+ The default implementation returns C{os.path.normpath('/' + path)}.
+ """
+ if os.path.isabs(path):
+ return os.path.normpath(path)
+ else:
+ return os.path.normpath('/' + path)
+
+ def readlink(self, path):
+ """
+ Return the target of a symbolic link (or shortcut) on the server.
+ If the specified path doesn't refer to a symbolic link, an error
+ should be returned.
+
+ @param path: path (relative or absolute) of the symbolic link.
+ @type path: str
+ @return: the target path of the symbolic link, or an error code like
+ L{SFTP_NO_SUCH_FILE}.
+ @rtype: str I{or error code}
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def symlink(self, target_path, path):
+ """
+ Create a symbolic link on the server, as new pathname C{path},
+ with C{target_path} as the target of the link.
+
+ @param target_path: path (relative or absolute) of the target for
+ this new symbolic link.
+ @type target_path: str
+ @param path: path (relative or absolute) of the symbolic link to
+ create.
+ @type path: str
+ @return: an error code like C{SFTP_OK}.
+ @rtype: int
+ """
+ return SFTP_OP_UNSUPPORTED
diff --git a/paramiko/ssh_exception.py b/paramiko/ssh_exception.py
new file mode 100644
index 0000000..900d4a0
--- /dev/null
+++ b/paramiko/ssh_exception.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Exceptions defined by paramiko.
+"""
+
+
+class SSHException (Exception):
+ """
+ Exception raised by failures in SSH2 protocol negotiation or logic errors.
+ """
+ pass
+
+
+class PasswordRequiredException (SSHException):
+ """
+ Exception raised when a password is needed to unlock a private key file.
+ """
+ pass
+
+
+class BadAuthenticationType (SSHException):
+ """
+ Exception raised when an authentication type (like password) is used, but
+ the server isn't allowing that type. (It may only allow public-key, for
+ example.)
+
+ @ivar allowed_types: list of allowed authentication types provided by the
+ server (possible values are: C{"none"}, C{"password"}, and
+ C{"publickey"}).
+ @type allowed_types: list
+
+ @since: 1.1
+ """
+ allowed_types = []
+
+ def __init__(self, explanation, types):
+ SSHException.__init__(self, explanation)
+ self.allowed_types = types
+
+ def __str__(self):
+ return SSHException.__str__(self) + ' (allowed_types=%r)' % self.allowed_types
+
+
+class PartialAuthentication (SSHException):
+ """
+ An internal exception thrown in the case of partial authentication.
+ """
+ allowed_types = []
+
+ def __init__(self, types):
+ SSHException.__init__(self, 'partial authentication')
+ self.allowed_types = types
diff --git a/paramiko/transport.py b/paramiko/transport.py
new file mode 100644
index 0000000..8714a96
--- /dev/null
+++ b/paramiko/transport.py
@@ -0,0 +1,1824 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+L{Transport} handles the core SSH2 protocol.
+"""
+
+import os
+import socket
+import string
+import struct
+import sys
+import threading
+import time
+import weakref
+
+from paramiko import util
+from paramiko.common import *
+from paramiko.compress import ZlibCompressor, ZlibDecompressor
+from paramiko.ssh_exception import SSHException, BadAuthenticationType
+from paramiko.message import Message
+from paramiko.channel import Channel
+from paramiko.sftp_client import SFTPClient
+from paramiko.packet import Packetizer, NeedRekeyException
+from paramiko.rsakey import RSAKey
+from paramiko.dsskey import DSSKey
+from paramiko.kex_group1 import KexGroup1
+from paramiko.kex_gex import KexGex
+from paramiko.primes import ModulusPack
+from paramiko.auth_handler import AuthHandler
+
+# these come from PyCrypt
+# http://www.amk.ca/python/writing/pycrypt/
+# i believe this on the standards track.
+# PyCrypt compiled for Win32 can be downloaded from the HashTar homepage:
+# http://nitace.bsd.uchicago.edu:8080/hashtar
+from Crypto.Cipher import Blowfish, AES, DES3
+from Crypto.Hash import SHA, MD5, HMAC
+
+
+# for thread cleanup
+_active_threads = []
+def _join_lingering_threads():
+ for thr in _active_threads:
+ thr.stop_thread()
+import atexit
+atexit.register(_join_lingering_threads)
+
+
+class SecurityOptions (object):
+ """
+ Simple object containing the security preferences of an ssh transport.
+ These are tuples of acceptable ciphers, digests, key types, and key
+ exchange algorithms, listed in order of preference.
+
+ Changing the contents and/or order of these fields affects the underlying
+ L{Transport} (but only if you change them before starting the session).
+ If you try to add an algorithm that paramiko doesn't recognize,
+ C{ValueError} will be raised. If you try to assign something besides a
+ tuple to one of the fields, C{TypeError} will be raised.
+
+ @since: ivysaur
+ """
+ __slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
+
+ def __init__(self, transport):
+ self._transport = transport
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+
+ @rtype: str
+ """
+ return '<paramiko.SecurityOptions for %s>' % repr(self._transport)
+
+ def _get_ciphers(self):
+ return self._transport._preferred_ciphers
+
+ def _get_digests(self):
+ return self._transport._preferred_macs
+
+ def _get_key_types(self):
+ return self._transport._preferred_keys
+
+ def _get_kex(self):
+ return self._transport._preferred_kex
+
+ def _get_compression(self):
+ return self._transport._preferred_compression
+
+ def _set(self, name, orig, x):
+ if type(x) is list:
+ x = tuple(x)
+ if type(x) is not tuple:
+ raise TypeError('expected tuple or list')
+ possible = getattr(self._transport, orig).keys()
+ if len(filter(lambda n: n not in possible, x)) > 0:
+ raise ValueError('unknown cipher')
+ setattr(self._transport, name, x)
+
+ def _set_ciphers(self, x):
+ self._set('_preferred_ciphers', '_cipher_info', x)
+
+ def _set_digests(self, x):
+ self._set('_preferred_macs', '_mac_info', x)
+
+ def _set_key_types(self, x):
+ self._set('_preferred_keys', '_key_info', x)
+
+ def _set_kex(self, x):
+ self._set('_preferred_kex', '_kex_info', x)
+
+ def _set_compression(self, x):
+ self._set('_preferred_compression', '_compression_info', x)
+
+ ciphers = property(_get_ciphers, _set_ciphers, None,
+ "Symmetric encryption ciphers")
+ digests = property(_get_digests, _set_digests, None,
+ "Digest (one-way hash) algorithms")
+ key_types = property(_get_key_types, _set_key_types, None,
+ "Public-key algorithms")
+ kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
+ compression = property(_get_compression, _set_compression, None,
+ "Compression algorithms")
+
+
+class Transport (threading.Thread):
+ """
+ An SSH Transport attaches to a stream (usually a socket), negotiates an
+ encrypted session, authenticates, and then creates stream tunnels, called
+ L{Channel}s, across the session. Multiple channels can be multiplexed
+ across a single session (and often are, in the case of port forwardings).
+ """
+
+ _PROTO_ID = '2.0'
+ _CLIENT_ID = 'paramiko_1.5.2'
+
+ _preferred_ciphers = ( 'aes128-cbc', 'blowfish-cbc', 'aes256-cbc', '3des-cbc' )
+ _preferred_macs = ( 'hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96' )
+ _preferred_keys = ( 'ssh-rsa', 'ssh-dss' )
+ _preferred_kex = ( 'diffie-hellman-group1-sha1', 'diffie-hellman-group-exchange-sha1' )
+ _preferred_compression = ( 'none', )
+
+ _cipher_info = {
+ 'blowfish-cbc': { 'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16 },
+ 'aes128-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16 },
+ 'aes256-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32 },
+ '3des-cbc': { 'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24 },
+ }
+
+ _mac_info = {
+ 'hmac-sha1': { 'class': SHA, 'size': 20 },
+ 'hmac-sha1-96': { 'class': SHA, 'size': 12 },
+ 'hmac-md5': { 'class': MD5, 'size': 16 },
+ 'hmac-md5-96': { 'class': MD5, 'size': 12 },
+ }
+
+ _key_info = {
+ 'ssh-rsa': RSAKey,
+ 'ssh-dss': DSSKey,
+ }
+
+ _kex_info = {
+ 'diffie-hellman-group1-sha1': KexGroup1,
+ 'diffie-hellman-group-exchange-sha1': KexGex,
+ }
+
+ _compression_info = {
+ # zlib@openssh.com is just zlib, but only turned on after a successful
+ # authentication. openssh servers may only offer this type because
+ # they've had troubles with security holes in zlib in the past.
+ 'zlib@openssh.com': ( ZlibCompressor, ZlibDecompressor ),
+ 'zlib': ( ZlibCompressor, ZlibDecompressor ),
+ 'none': ( None, None ),
+ }
+
+
+ _modulus_pack = None
+
+ def __init__(self, sock):
+ """
+ Create a new SSH session over an existing socket, or socket-like
+ object. This only creates the Transport object; it doesn't begin the
+ SSH session yet. Use L{connect} or L{start_client} to begin a client
+ session, or L{start_server} to begin a server session.
+
+ If the object is not actually a socket, it must have the following
+ methods:
+ - C{send(str)}: Writes from 1 to C{len(str)} bytes, and
+ returns an int representing the number of bytes written. Returns
+ 0 or raises C{EOFError} if the stream has been closed.
+ - C{recv(int)}: Reads from 1 to C{int} bytes and returns them as a
+ string. Returns 0 or raises C{EOFError} if the stream has been
+ closed.
+ - C{close()}: Closes the socket.
+ - C{settimeout(n)}: Sets a (float) timeout on I/O operations.
+
+ For ease of use, you may also pass in an address (as a tuple) or a host
+ string as the C{sock} argument. (A host string is a hostname with an
+ optional port (separated by C{":"}) which will be converted into a
+ tuple of C{(hostname, port)}.) A socket will be connected to this
+ address and used for communication. Exceptions from the C{socket} call
+ may be thrown in this case.
+
+ @param sock: a socket or socket-like object to create the session over.
+ @type sock: socket
+ """
+ if type(sock) is str:
+ # convert "host:port" into (host, port)
+ hl = sock.split(':', 1)
+ if len(hl) == 1:
+ sock = (hl[0], 22)
+ else:
+ sock = (hl[0], int(hl[1]))
+ if type(sock) is tuple:
+ # connect to the given (host, port)
+ hostname, port = sock
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((hostname, port))
+ # okay, normal socket-ish flow here...
+ threading.Thread.__init__(self)
+ self.randpool = randpool
+ self.sock = sock
+ # Python < 2.3 doesn't have the settimeout method - RogerB
+ try:
+ # we set the timeout so we can check self.active periodically to
+ # see if we should bail. socket.timeout exception is never
+ # propagated.
+ self.sock.settimeout(0.1)
+ except AttributeError:
+ pass
+ # negotiated crypto parameters
+ self.packetizer = Packetizer(sock)
+ self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
+ self.remote_version = ''
+ self.local_cipher = self.remote_cipher = ''
+ self.local_kex_init = self.remote_kex_init = None
+ self.session_id = None
+ # /negotiated crypto parameters
+ self.expected_packet = 0
+ self.active = False
+ self.initial_kex_done = False
+ self.in_kex = False
+ self.lock = threading.Lock() # synchronization (always higher level than write_lock)
+ self.channels = weakref.WeakValueDictionary() # (id -> Channel)
+ self.channel_events = { } # (id -> Event)
+ self.channels_seen = { } # (id -> True)
+ self.channel_counter = 1
+ self.window_size = 65536
+ self.max_packet_size = 34816
+ self.saved_exception = None
+ self.clear_to_send = threading.Event()
+ self.clear_to_send_lock = threading.Lock()
+ self.log_name = 'paramiko.transport'
+ self.logger = util.get_logger(self.log_name)
+ self.packetizer.set_log(self.logger)
+ self.auth_handler = None
+ self.authenticated = False
+ # user-defined event callbacks:
+ self.completion_event = None
+ # server mode:
+ self.server_mode = False
+ self.server_object = None
+ self.server_key_dict = { }
+ self.server_accepts = [ ]
+ self.server_accept_cv = threading.Condition(self.lock)
+ self.subsystem_table = { }
+
+ def __del__(self):
+ self.close()
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+
+ @rtype: str
+ """
+ out = '<paramiko.Transport at %s' % hex(long(id(self)) & 0xffffffffL)
+ if not self.active:
+ out += ' (unconnected)'
+ else:
+ if self.local_cipher != '':
+ out += ' (cipher %s, %d bits)' % (self.local_cipher,
+ self._cipher_info[self.local_cipher]['key-size'] * 8)
+ if self.is_authenticated():
+ if len(self.channels) == 1:
+ out += ' (active; 1 open channel)'
+ else:
+ out += ' (active; %d open channels)' % len(self.channels)
+ elif self.initial_kex_done:
+ out += ' (connected; awaiting auth)'
+ else:
+ out += ' (connecting)'
+ out += '>'
+ return out
+
+ def get_security_options(self):
+ """
+ Return a L{SecurityOptions} object which can be used to tweak the
+ encryption algorithms this transport will permit, and the order of
+ preference for them.
+
+ @return: an object that can be used to change the preferred algorithms
+ for encryption, digest (hash), public key, and key exchange.
+ @rtype: L{SecurityOptions}
+
+ @since: ivysaur
+ """
+ return SecurityOptions(self)
+
+ def start_client(self, event=None):
+ """
+ Negotiate a new SSH2 session as a client. This is the first step after
+ creating a new L{Transport}. A separate thread is created for protocol
+ negotiation.
+
+ If an event is passed in, this method returns immediately. When
+ negotiation is done (successful or not), the given C{Event} will
+ be triggered. On failure, L{is_active} will return C{False}.
+
+ (Since 1.4) If C{event} is C{None}, this method will not return until
+ negotation is done. On success, the method returns normally.
+ Otherwise an SSHException is raised.
+
+ After a successful negotiation, you will usually want to authenticate,
+ calling L{auth_password <Transport.auth_password>} or
+ L{auth_publickey <Transport.auth_publickey>}.
+
+ @note: L{connect} is a simpler method for connecting as a client.
+
+ @note: After calling this method (or L{start_server} or L{connect}),
+ you should no longer directly read from or write to the original
+ socket object.
+
+ @param event: an event to trigger when negotiation is complete
+ (optional)
+ @type event: threading.Event
+
+ @raise SSHException: if negotiation fails (and no C{event} was passed
+ in)
+ """
+ self.active = True
+ if event is not None:
+ # async, return immediately and let the app poll for completion
+ self.completion_event = event
+ self.start()
+ return
+
+ # synchronous, wait for a result
+ self.completion_event = event = threading.Event()
+ self.start()
+ while True:
+ event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is not None:
+ raise e
+ raise SSHException('Negotiation failed.')
+ if event.isSet():
+ break
+
+ def start_server(self, event=None, server=None):
+ """
+ Negotiate a new SSH2 session as a server. This is the first step after
+ creating a new L{Transport} and setting up your server host key(s). A
+ separate thread is created for protocol negotiation.
+
+ If an event is passed in, this method returns immediately. When
+ negotiation is done (successful or not), the given C{Event} will
+ be triggered. On failure, L{is_active} will return C{False}.
+
+ (Since 1.4) If C{event} is C{None}, this method will not return until
+ negotation is done. On success, the method returns normally.
+ Otherwise an SSHException is raised.
+
+ After a successful negotiation, the client will need to authenticate.
+ Override the methods
+ L{get_allowed_auths <ServerInterface.get_allowed_auths>},
+ L{check_auth_none <ServerInterface.check_auth_none>},
+ L{check_auth_password <ServerInterface.check_auth_password>}, and
+ L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the
+ given C{server} object to control the authentication process.
+
+ After a successful authentication, the client should request to open
+ a channel. Override
+ L{check_channel_request <ServerInterface.check_channel_request>} in the
+ given C{server} object to allow channels to be opened.
+
+ @note: After calling this method (or L{start_client} or L{connect}),
+ you should no longer directly read from or write to the original
+ socket object.
+
+ @param event: an event to trigger when negotiation is complete.
+ @type event: threading.Event
+ @param server: an object used to perform authentication and create
+ L{Channel}s.
+ @type server: L{server.ServerInterface}
+
+ @raise SSHException: if negotiation fails (and no C{event} was passed
+ in)
+ """
+ if server is None:
+ server = ServerInterface()
+ self.server_mode = True
+ self.server_object = server
+ self.active = True
+ if event is not None:
+ # async, return immediately and let the app poll for completion
+ self.completion_event = event
+ self.start()
+ return
+
+ # synchronous, wait for a result
+ self.completion_event = event = threading.Event()
+ self.start()
+ while True:
+ event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is not None:
+ raise e
+ raise SSHException('Negotiation failed.')
+ if event.isSet():
+ break
+
+ def add_server_key(self, key):
+ """
+ Add a host key to the list of keys used for server mode. When behaving
+ as a server, the host key is used to sign certain packets during the
+ SSH2 negotiation, so that the client can trust that we are who we say
+ we are. Because this is used for signing, the key must contain private
+ key info, not just the public half. Only one key of each type (RSA or
+ DSS) is kept.
+
+ @param key: the host key to add, usually an L{RSAKey <rsakey.RSAKey>} or
+ L{DSSKey <dsskey.DSSKey>}.
+ @type key: L{PKey <pkey.PKey>}
+ """
+ self.server_key_dict[key.get_name()] = key
+
+ def get_server_key(self):
+ """
+ Return the active host key, in server mode. After negotiating with the
+ client, this method will return the negotiated host key. If only one
+ type of host key was set with L{add_server_key}, that's the only key
+ that will ever be returned. But in cases where you have set more than
+ one type of host key (for example, an RSA key and a DSS key), the key
+ type will be negotiated by the client, and this method will return the
+ key of the type agreed on. If the host key has not been negotiated
+ yet, C{None} is returned. In client mode, the behavior is undefined.
+
+ @return: host key of the type negotiated by the client, or C{None}.
+ @rtype: L{PKey <pkey.PKey>}
+ """
+ try:
+ return self.server_key_dict[self.host_key_type]
+ except KeyError:
+ return None
+
+ def load_server_moduli(filename=None):
+ """
+ I{(optional)}
+ Load a file of prime moduli for use in doing group-exchange key
+ negotiation in server mode. It's a rather obscure option and can be
+ safely ignored.
+
+ In server mode, the remote client may request "group-exchange" key
+ negotiation, which asks the server to send a random prime number that
+ fits certain criteria. These primes are pretty difficult to compute,
+ so they can't be generated on demand. But many systems contain a file
+ of suitable primes (usually named something like C{/etc/ssh/moduli}).
+ If you call C{load_server_moduli} and it returns C{True}, then this
+ file of primes has been loaded and we will support "group-exchange" in
+ server mode. Otherwise server mode will just claim that it doesn't
+ support that method of key negotiation.
+
+ @param filename: optional path to the moduli file, if you happen to
+ know that it's not in a standard location.
+ @type filename: str
+ @return: True if a moduli file was successfully loaded; False
+ otherwise.
+ @rtype: bool
+
+ @since: doduo
+
+ @note: This has no effect when used in client mode.
+ """
+ Transport._modulus_pack = ModulusPack(randpool)
+ # places to look for the openssh "moduli" file
+ file_list = [ '/etc/ssh/moduli', '/usr/local/etc/moduli' ]
+ if filename is not None:
+ file_list.insert(0, filename)
+ for fn in file_list:
+ try:
+ Transport._modulus_pack.read_file(fn)
+ return True
+ except IOError:
+ pass
+ # none succeeded
+ Transport._modulus_pack = None
+ return False
+ load_server_moduli = staticmethod(load_server_moduli)
+
+ def close(self):
+ """
+ Close this session, and any open channels that are tied to it.
+ """
+ self.active = False
+ # since this may be called from __del__, can't assume any attributes exist
+ try:
+ self.packetizer.close()
+ for chan in self.channels.values():
+ chan._unlink()
+ except AttributeError:
+ pass
+
+ def get_remote_server_key(self):
+ """
+ Return the host key of the server (in client mode).
+
+ @note: Previously this call returned a tuple of (key type, key string).
+ You can get the same effect by calling
+ L{PKey.get_name <pkey.PKey.get_name>} for the key type, and
+ C{str(key)} for the key string.
+
+ @raise SSHException: if no session is currently active.
+
+ @return: public key of the remote server.
+ @rtype: L{PKey <pkey.PKey>}
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ raise SSHException('No existing session')
+ return self.host_key
+
+ def is_active(self):
+ """
+ Return true if this session is active (open).
+
+ @return: True if the session is still active (open); False if the
+ session is closed.
+ @rtype: bool
+ """
+ return self.active
+
+ def open_session(self):
+ """
+ Request a new channel to the server, of type C{"session"}. This
+ is just an alias for C{open_channel('session')}.
+
+ @return: a new L{Channel} on success, or C{None} if the request is
+ rejected or the session ends prematurely.
+ @rtype: L{Channel}
+ """
+ return self.open_channel('session')
+
+ def open_channel(self, kind, dest_addr=None, src_addr=None):
+ """
+ Request a new channel to the server. L{Channel}s are socket-like
+ objects used for the actual transfer of data across the session.
+ You may only request a channel after negotiating encryption (using
+ L{connect} or L{start_client}) and authenticating.
+
+ @param kind: the kind of channel requested (usually C{"session"},
+ C{"forwarded-tcpip"} or C{"direct-tcpip"}).
+ @type kind: str
+ @param dest_addr: the destination address of this port forwarding,
+ if C{kind} is C{"forwarded-tcpip"} or C{"direct-tcpip"} (ignored
+ for other channel types).
+ @type dest_addr: (str, int)
+ @param src_addr: the source address of this port forwarding, if
+ C{kind} is C{"forwarded-tcpip"} or C{"direct-tcpip"}.
+ @type src_addr: (str, int)
+ @return: a new L{Channel} on success, or C{None} if the request is
+ rejected or the session ends prematurely.
+ @rtype: L{Channel}
+ """
+ chan = None
+ if not self.active:
+ # don't bother trying to allocate a channel
+ return None
+ self.lock.acquire()
+ try:
+ chanid = self.channel_counter
+ while self.channels.has_key(chanid):
+ self.channel_counter = (self.channel_counter + 1) & 0xffffff
+ chanid = self.channel_counter
+ self.channel_counter = (self.channel_counter + 1) & 0xffffff
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_OPEN))
+ m.add_string(kind)
+ m.add_int(chanid)
+ m.add_int(self.window_size)
+ m.add_int(self.max_packet_size)
+ if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
+ m.add_string(dest_addr[0])
+ m.add_int(dest_addr[1])
+ m.add_string(src_addr[0])
+ m.add_int(src_addr[1])
+ self.channels[chanid] = chan = Channel(chanid)
+ self.channel_events[chanid] = event = threading.Event()
+ self.channels_seen[chanid] = True
+ chan._set_transport(self)
+ chan._set_window(self.window_size, self.max_packet_size)
+ finally:
+ self.lock.release()
+ self._send_user_message(m)
+ while 1:
+ event.wait(0.1);
+ if not self.active:
+ return None
+ if event.isSet():
+ break
+ try:
+ self.lock.acquire()
+ if not self.channels.has_key(chanid):
+ chan = None
+ finally:
+ self.lock.release()
+ return chan
+
+ def open_sftp_client(self):
+ """
+ Create an SFTP client channel from an open transport. On success,
+ an SFTP session will be opened with the remote host, and a new
+ SFTPClient object will be returned.
+
+ @return: a new L{SFTPClient} object, referring to an sftp session
+ (channel) across this transport
+ @rtype: L{SFTPClient}
+ """
+ return SFTPClient.from_transport(self)
+
+ def send_ignore(self, bytes=None):
+ """
+ Send a junk packet across the encrypted link. This is sometimes used
+ to add "noise" to a connection to confuse would-be attackers. It can
+ also be used as a keep-alive for long lived connections traversing
+ firewalls.
+
+ @param bytes: the number of random bytes to send in the payload of the
+ ignored packet -- defaults to a random number from 10 to 41.
+ @type bytes: int
+
+ @since: fearow
+ """
+ m = Message()
+ m.add_byte(chr(MSG_IGNORE))
+ randpool.stir()
+ if bytes is None:
+ bytes = (ord(randpool.get_bytes(1)) % 32) + 10
+ m.add_bytes(randpool.get_bytes(bytes))
+ self._send_user_message(m)
+
+ def renegotiate_keys(self):
+ """
+ Force this session to switch to new keys. Normally this is done
+ automatically after the session hits a certain number of packets or
+ bytes sent or received, but this method gives you the option of forcing
+ new keys whenever you want. Negotiating new keys causes a pause in
+ traffic both ways as the two sides swap keys and do computations. This
+ method returns when the session has switched to new keys, or the
+ session has died mid-negotiation.
+
+ @return: True if the renegotiation was successful, and the link is
+ using new keys; False if the session dropped during renegotiation.
+ @rtype: bool
+ """
+ self.completion_event = threading.Event()
+ self._send_kex_init()
+ while 1:
+ self.completion_event.wait(0.1);
+ if not self.active:
+ return False
+ if self.completion_event.isSet():
+ break
+ return True
+
+ def set_keepalive(self, interval):
+ """
+ Turn on/off keepalive packets (default is off). If this is set, after
+ C{interval} seconds without sending any data over the connection, a
+ "keepalive" packet will be sent (and ignored by the remote host). This
+ can be useful to keep connections alive over a NAT, for example.
+
+ @param interval: seconds to wait before sending a keepalive packet (or
+ 0 to disable keepalives).
+ @type interval: int
+
+ @since: fearow
+ """
+ self.packetizer.set_keepalive(interval,
+ lambda x=self: x.global_request('keepalive@lag.net', wait=False))
+
+ def global_request(self, kind, data=None, wait=True):
+ """
+ Make a global request to the remote host. These are normally
+ extensions to the SSH2 protocol.
+
+ @param kind: name of the request.
+ @type kind: str
+ @param data: an optional tuple containing additional data to attach
+ to the request.
+ @type data: tuple
+ @param wait: C{True} if this method should not return until a response
+ is received; C{False} otherwise.
+ @type wait: bool
+ @return: a L{Message} containing possible additional data if the
+ request was successful (or an empty L{Message} if C{wait} was
+ C{False}); C{None} if the request was denied.
+ @rtype: L{Message}
+
+ @since: fearow
+ """
+ if wait:
+ self.completion_event = threading.Event()
+ m = Message()
+ m.add_byte(chr(MSG_GLOBAL_REQUEST))
+ m.add_string(kind)
+ m.add_boolean(wait)
+ if data is not None:
+ m.add(*data)
+ self._log(DEBUG, 'Sending global request "%s"' % kind)
+ self._send_user_message(m)
+ if not wait:
+ return None
+ while True:
+ self.completion_event.wait(0.1)
+ if not self.active:
+ return None
+ if self.completion_event.isSet():
+ break
+ return self.global_response
+
+ def accept(self, timeout=None):
+ """
+ Return the next channel opened by the client over this transport, in
+ server mode. If no channel is opened before the given timeout, C{None}
+ is returned.
+
+ @param timeout: seconds to wait for a channel, or C{None} to wait
+ forever
+ @type timeout: int
+ @return: a new Channel opened by the client
+ @rtype: L{Channel}
+ """
+ self.lock.acquire()
+ try:
+ if len(self.server_accepts) > 0:
+ chan = self.server_accepts.pop(0)
+ else:
+ self.server_accept_cv.wait(timeout)
+ if len(self.server_accepts) > 0:
+ chan = self.server_accepts.pop(0)
+ else:
+ # timeout
+ chan = None
+ finally:
+ self.lock.release()
+ return chan
+
+ def connect(self, hostkey=None, username='', password=None, pkey=None):
+ """
+ Negotiate an SSH2 session, and optionally verify the server's host key
+ and authenticate using a password or private key. This is a shortcut
+ for L{start_client}, L{get_remote_server_key}, and
+ L{Transport.auth_password} or L{Transport.auth_publickey}. Use those
+ methods if you want more control.
+
+ You can use this method immediately after creating a Transport to
+ negotiate encryption with a server. If it fails, an exception will be
+ thrown. On success, the method will return cleanly, and an encrypted
+ session exists. You may immediately call L{open_channel} or
+ L{open_session} to get a L{Channel} object, which is used for data
+ transfer.
+
+ @note: If you fail to supply a password or private key, this method may
+ succeed, but a subsequent L{open_channel} or L{open_session} call may
+ fail because you haven't authenticated yet.
+
+ @param hostkey: the host key expected from the server, or C{None} if
+ you don't want to do host key verification.
+ @type hostkey: L{PKey<pkey.PKey>}
+ @param username: the username to authenticate as.
+ @type username: str
+ @param password: a password to use for authentication, if you want to
+ use password authentication; otherwise C{None}.
+ @type password: str
+ @param pkey: a private key to use for authentication, if you want to
+ use private key authentication; otherwise C{None}.
+ @type pkey: L{PKey<pkey.PKey>}
+
+ @raise SSHException: if the SSH2 negotiation fails, the host key
+ supplied by the server is incorrect, or authentication fails.
+
+ @since: doduo
+ """
+ if hostkey is not None:
+ self._preferred_keys = [ hostkey.get_name() ]
+
+ self.start_client()
+
+ # check host key if we were given one
+ if (hostkey is not None):
+ key = self.get_remote_server_key()
+ if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)):
+ self._log(DEBUG, 'Bad host key from server')
+ self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey))))
+ self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key))))
+ raise SSHException('Bad host key from server')
+ self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
+
+ if (pkey is not None) or (password is not None):
+ if password is not None:
+ self._log(DEBUG, 'Attempting password auth...')
+ self.auth_password(username, password)
+ else:
+ self._log(DEBUG, 'Attempting public-key auth...')
+ self.auth_publickey(username, pkey)
+
+ return
+
+ def get_exception(self):
+ """
+ Return any exception that happened during the last server request.
+ This can be used to fetch more specific error information after using
+ calls like L{start_client}. The exception (if any) is cleared after
+ this call.
+
+ @return: an exception, or C{None} if there is no stored exception.
+ @rtype: Exception
+
+ @since: 1.1
+ """
+ self.lock.acquire()
+ try:
+ e = self.saved_exception
+ self.saved_exception = None
+ return e
+ finally:
+ self.lock.release()
+
+ def set_subsystem_handler(self, name, handler, *larg, **kwarg):
+ """
+ Set the handler class for a subsystem in server mode. If a request
+ for this subsystem is made on an open ssh channel later, this handler
+ will be constructed and called -- see L{SubsystemHandler} for more
+ detailed documentation.
+
+ Any extra parameters (including keyword arguments) are saved and
+ passed to the L{SubsystemHandler} constructor later.
+
+ @param name: name of the subsystem.
+ @type name: str
+ @param handler: subclass of L{SubsystemHandler} that handles this
+ subsystem.
+ @type handler: class
+ """
+ try:
+ self.lock.acquire()
+ self.subsystem_table[name] = (handler, larg, kwarg)
+ finally:
+ self.lock.release()
+
+ def is_authenticated(self):
+ """
+ Return true if this session is active and authenticated.
+
+ @return: True if the session is still open and has been authenticated
+ successfully; False if authentication failed and/or the session is
+ closed.
+ @rtype: bool
+ """
+ return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
+
+ def get_username(self):
+ """
+ Return the username this connection is authenticated for. If the
+ session is not authenticated (or authentication failed), this method
+ returns C{None}.
+
+ @return: username that was authenticated, or C{None}.
+ @rtype: string
+
+ @since: fearow
+ """
+ if not self.active or (self.auth_handler is None):
+ return None
+ return self.auth_handler.get_username()
+
+ def auth_none(self, username):
+ """
+ Try to authenticate to the server using no authentication at all.
+ This will almost always fail. It may be useful for determining the
+ list of authentication types supported by the server, by catching the
+ L{BadAuthenticationType} exception raised.
+
+ @param username: the username to authenticate as
+ @type username: string
+ @return: list of auth types permissible for the next stage of
+ authentication (normally empty)
+ @rtype: list
+
+ @raise BadAuthenticationType: if "none" authentication isn't allowed
+ by the server for this user
+ @raise SSHException: if the authentication failed due to a network
+ error
+
+ @since: 1.5
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ raise SSHException('No existing session')
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_none(username, my_event)
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_password(self, username, password, event=None, fallback=True):
+ """
+ Authenticate to the server using a password. The username and password
+ are sent over an encrypted link.
+
+ If an C{event} is passed in, this method will return immediately, and
+ the event will be triggered once authentication succeeds or fails. On
+ success, L{is_authenticated} will return C{True}. On failure, you may
+ use L{get_exception} to get more detailed error information.
+
+ Since 1.1, if no event is passed, this method will block until the
+ authentication succeeds or fails. On failure, an exception is raised.
+ Otherwise, the method simply returns.
+
+ Since 1.5, if no event is passed and C{fallback} is C{True} (the
+ default), if the server doesn't support plain password authentication
+ but does support so-called "keyboard-interactive" mode, an attempt
+ will be made to authenticate using this interactive mode. If it fails,
+ the normal exception will be thrown as if the attempt had never been
+ made. This is useful for some recent Gentoo and Debian distributions,
+ which turn off plain password authentication in a misguided belief
+ that interactive authentication is "more secure". (It's not.)
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ @param username: the username to authenticate as
+ @type username: string
+ @param password: the password to authenticate with
+ @type password: string
+ @param event: an event to trigger when the authentication attempt is
+ complete (whether it was successful or not)
+ @type event: threading.Event
+ @param fallback: C{True} if an attempt at an automated "interactive"
+ password auth should be made if the server doesn't support normal
+ password auth
+ @type fallback: bool
+ @return: list of auth types permissible for the next stage of
+ authentication (normally empty)
+ @rtype: list
+
+ @raise BadAuthenticationType: if password authentication isn't
+ allowed by the server for this user (and no event was passed in)
+ @raise SSHException: if the authentication failed (and no event was
+ passed in)
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to send the password unless we're on a secure link
+ raise SSHException('No existing session')
+ if event is None:
+ my_event = threading.Event()
+ else:
+ my_event = event
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_password(username, password, my_event)
+ if event is not None:
+ # caller wants to wait for event themselves
+ return []
+ try:
+ return self.auth_handler.wait_for_response(my_event)
+ except BadAuthenticationType, x:
+ # if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
+ if not fallback or not 'keyboard-interactive' in x.allowed_types:
+ raise
+ try:
+ def handler(title, instructions, fields):
+ if len(fields) > 1:
+ raise SSHException('Fallback authentication failed.')
+ if len(fields) == 0:
+ # for some reason, at least on os x, a 2nd request will
+ # be made with zero fields requested. maybe it's just
+ # to try to fake out automated scripting of the exact
+ # type we're doing here. *shrug* :)
+ return []
+ return [ password ]
+ return self.auth_interactive(username, handler)
+ except SSHException, ignored:
+ # attempt failed; just raise the original exception
+ raise x
+
+ def auth_publickey(self, username, key, event=None):
+ """
+ Authenticate to the server using a private key. The key is used to
+ sign data from the server, so it must include the private part.
+
+ If an C{event} is passed in, this method will return immediately, and
+ the event will be triggered once authentication succeeds or fails. On
+ success, L{is_authenticated} will return C{True}. On failure, you may
+ use L{get_exception} to get more detailed error information.
+
+ Since 1.1, if no event is passed, this method will block until the
+ authentication succeeds or fails. On failure, an exception is raised.
+ Otherwise, the method simply returns.
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ @param username: the username to authenticate as
+ @type username: string
+ @param key: the private key to authenticate with
+ @type key: L{PKey <pkey.PKey>}
+ @param event: an event to trigger when the authentication attempt is
+ complete (whether it was successful or not)
+ @type event: threading.Event
+ @return: list of auth types permissible for the next stage of
+ authentication (normally empty).
+ @rtype: list
+
+ @raise BadAuthenticationType: if public-key authentication isn't
+ allowed by the server for this user (and no event was passed in).
+ @raise SSHException: if the authentication failed (and no event was
+ passed in).
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException('No existing session')
+ if event is None:
+ my_event = threading.Event()
+ else:
+ my_event = event
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_publickey(username, key, my_event)
+ if event is not None:
+ # caller wants to wait for event themselves
+ return []
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_interactive(self, username, handler, submethods=''):
+ """
+ Authenticate to the server interactively. A handler is used to answer
+ arbitrary questions from the server. On many servers, this is just a
+ dumb wrapper around PAM.
+
+ This method will block until the authentication succeeds or fails,
+ peroidically calling the handler asynchronously to get answers to
+ authentication questions. The handler may be called more than once
+ if the server continues to ask questions.
+
+ The handler is expected to be a callable that will handle calls of the
+ form: C{handler(title, instructions, prompt_list)}. The C{title} is
+ meant to be a dialog-window title, and the C{instructions} are user
+ instructions (both are strings). C{prompt_list} will be a list of
+ prompts, each prompt being a tuple of C{(str, bool)}. The string is
+ the prompt and the boolean indicates whether the user text should be
+ echoed.
+
+ A sample call would thus be:
+ C{handler('title', 'instructions', [('Password:', False)])}.
+
+ The handler should return a list or tuple of answers to the server's
+ questions.
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ @param username: the username to authenticate as
+ @type username: string
+ @param handler: a handler for responding to server questions
+ @type handler: callable
+ @param submethods: a string list of desired submethods (optional)
+ @type submethods: str
+ @return: list of auth types permissible for the next stage of
+ authentication (normally empty).
+ @rtype: list
+
+ @raise BadAuthenticationType: if public-key authentication isn't
+ allowed by the server for this user
+ @raise SSHException: if the authentication failed
+
+ @since: 1.5
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException('No existing session')
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_interactive(username, handler, my_event, submethods)
+ return self.auth_handler.wait_for_response(my_event)
+
+ def set_log_channel(self, name):
+ """
+ Set the channel for this transport's logging. The default is
+ C{"paramiko.transport"} but it can be set to anything you want.
+ (See the C{logging} module for more info.) SSH Channels will log
+ to a sub-channel of the one specified.
+
+ @param name: new channel name for logging.
+ @type name: str
+
+ @since: 1.1
+ """
+ self.log_name = name
+ self.logger = util.get_logger(name)
+
+ def get_log_channel(self):
+ """
+ Return the channel name used for this transport's logging.
+
+ @return: channel name.
+ @rtype: str
+
+ @since: 1.2
+ """
+ return self.log_name
+
+ def set_hexdump(self, hexdump):
+ """
+ Turn on/off logging a hex dump of protocol traffic at DEBUG level in
+ the logs. Normally you would want this off (which is the default),
+ but if you are debugging something, it may be useful.
+
+ @param hexdump: C{True} to log protocol traffix (in hex) to the log;
+ C{False} otherwise.
+ @type hexdump: bool
+ """
+ self.packetizer.set_hexdump(hexdump)
+
+ def get_hexdump(self):
+ """
+ Return C{True} if the transport is currently logging hex dumps of
+ protocol traffic.
+
+ @return: C{True} if hex dumps are being logged
+ @rtype: bool
+
+ @since: 1.4
+ """
+ return self.packetizer.get_hexdump()
+
+ def use_compression(self, compress=True):
+ """
+ Turn on/off compression. This will only have an affect before starting
+ the transport (ie before calling L{connect}, etc). By default,
+ compression is off since it negatively affects interactive sessions
+ and is not fully tested.
+
+ @param compress: C{True} to ask the remote client/server to compress
+ traffic; C{False} to refuse compression
+ @type compress: bool
+
+ @since: 1.5.2
+ """
+ if compress:
+ self._preferred_compression = ( 'zlib@openssh.com', 'zlib', 'none' )
+ else:
+ self._preferred_compression = ( 'none', )
+
+ def stop_thread(self):
+ self.active = False
+ self.packetizer.close()
+
+
+ ### internals...
+
+
+ def _log(self, level, msg):
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.logger.log(level, m)
+ else:
+ self.logger.log(level, msg)
+
+ def _get_modulus_pack(self):
+ "used by KexGex to find primes for group exchange"
+ return self._modulus_pack
+
+ def _unlink_channel(self, chanid):
+ "used by a Channel to remove itself from the active channel list"
+ try:
+ self.lock.acquire()
+ if self.channels.has_key(chanid):
+ del self.channels[chanid]
+ finally:
+ self.lock.release()
+
+ def _send_message(self, data):
+ self.packetizer.send_message(data)
+
+ def _send_user_message(self, data):
+ """
+ send a message, but block if we're in key negotiation. this is used
+ for user-initiated requests.
+ """
+ while True:
+ self.clear_to_send.wait(0.1)
+ if not self.active:
+ self._log(DEBUG, 'Dropping user packet because connection is dead.')
+ return
+ self.clear_to_send_lock.acquire()
+ if self.clear_to_send.isSet():
+ break
+ self.clear_to_send_lock.release()
+ try:
+ self._send_message(data)
+ finally:
+ self.clear_to_send_lock.release()
+
+ def _set_K_H(self, k, h):
+ "used by a kex object to set the K (root key) and H (exchange hash)"
+ self.K = k
+ self.H = h
+ if self.session_id == None:
+ self.session_id = h
+
+ def _expect_packet(self, type):
+ "used by a kex object to register the next packet type it expects to see"
+ self.expected_packet = type
+
+ def _verify_key(self, host_key, sig):
+ key = self._key_info[self.host_key_type](Message(host_key))
+ if key is None:
+ raise SSHException('Unknown host key type')
+ if not key.verify_ssh_sig(self.H, Message(sig)):
+ raise SSHException('Signature verification (%s) failed. Boo. Robey should debug this.' % self.host_key_type)
+ self.host_key = key
+
+ def _compute_key(self, id, nbytes):
+ "id is 'A' - 'F' for the various keys used by ssh"
+ m = Message()
+ m.add_mpint(self.K)
+ m.add_bytes(self.H)
+ m.add_byte(id)
+ m.add_bytes(self.session_id)
+ out = sofar = SHA.new(str(m)).digest()
+ while len(out) < nbytes:
+ m = Message()
+ m.add_mpint(self.K)
+ m.add_bytes(self.H)
+ m.add_bytes(sofar)
+ hash = SHA.new(str(m)).digest()
+ out += hash
+ sofar += hash
+ return out[:nbytes]
+
+ def _get_cipher(self, name, key, iv):
+ if not self._cipher_info.has_key(name):
+ raise SSHException('Unknown client cipher ' + name)
+ return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
+
+ def run(self):
+ # (use the exposed "run" method, because if we specify a thread target
+ # of a private method, threading.Thread will keep a reference to it
+ # indefinitely, creating a GC cycle and not letting Transport ever be
+ # GC'd. it's a bug in Thread.)
+
+ # active=True occurs before the thread is launched, to avoid a race
+ _active_threads.append(self)
+ if self.server_mode:
+ self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & 0xffffffffL))
+ else:
+ self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & 0xffffffffL))
+ try:
+ self.packetizer.write_all(self.local_version + '\r\n')
+ self._check_banner()
+ self._send_kex_init()
+ self.expected_packet = MSG_KEXINIT
+
+ while self.active:
+ if self.packetizer.need_rekey() and not self.in_kex:
+ self._send_kex_init()
+ try:
+ ptype, m = self.packetizer.read_message()
+ except NeedRekeyException:
+ continue
+ if ptype == MSG_IGNORE:
+ continue
+ elif ptype == MSG_DISCONNECT:
+ self._parse_disconnect(m)
+ self.active = False
+ self.packetizer.close()
+ break
+ elif ptype == MSG_DEBUG:
+ self._parse_debug(m)
+ continue
+ if self.expected_packet != 0:
+ if ptype != self.expected_packet:
+ raise SSHException('Expecting packet %d, got %d' % (self.expected_packet, ptype))
+ self.expected_packet = 0
+ if (ptype >= 30) and (ptype <= 39):
+ self.kex_engine.parse_next(ptype, m)
+ continue
+
+ if self._handler_table.has_key(ptype):
+ self._handler_table[ptype](self, m)
+ elif self._channel_handler_table.has_key(ptype):
+ chanid = m.get_int()
+ if self.channels.has_key(chanid):
+ self._channel_handler_table[ptype](self.channels[chanid], m)
+ elif self.channels_seen.has_key(chanid):
+ self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
+ else:
+ self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
+ self.active = False
+ self.packetizer.close()
+ elif (self.auth_handler is not None) and self.auth_handler._handler_table.has_key(ptype):
+ self.auth_handler._handler_table[ptype](self.auth_handler, m)
+ else:
+ self._log(WARNING, 'Oops, unhandled type %d' % ptype)
+ msg = Message()
+ msg.add_byte(chr(MSG_UNIMPLEMENTED))
+ msg.add_int(m.seqno)
+ self._send_message(msg)
+ except SSHException, e:
+ self._log(ERROR, 'Exception: ' + str(e))
+ self._log(ERROR, util.tb_strings())
+ self.saved_exception = e
+ except EOFError, e:
+ self._log(DEBUG, 'EOF in transport thread')
+ #self._log(DEBUG, util.tb_strings())
+ self.saved_exception = e
+ except socket.error, e:
+ if type(e.args) is tuple:
+ emsg = '%s (%d)' % (e.args[1], e.args[0])
+ else:
+ emsg = e.args
+ self._log(ERROR, 'Socket exception: ' + emsg)
+ self.saved_exception = e
+ except Exception, e:
+ self._log(ERROR, 'Unknown exception: ' + str(e))
+ self._log(ERROR, util.tb_strings())
+ self.saved_exception = e
+ _active_threads.remove(self)
+ for chan in self.channels.values():
+ chan._unlink()
+ if self.active:
+ self.active = False
+ self.packetizer.close()
+ if self.completion_event != None:
+ self.completion_event.set()
+ if self.auth_handler is not None:
+ self.auth_handler.abort()
+ for event in self.channel_events.values():
+ event.set()
+ self.sock.close()
+
+
+ ### protocol stages
+
+
+ def _negotiate_keys(self, m):
+ # throws SSHException on anything unusual
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.clear()
+ finally:
+ self.clear_to_send_lock.release()
+ if self.local_kex_init == None:
+ # remote side wants to renegotiate
+ self._send_kex_init()
+ self._parse_kex_init(m)
+ self.kex_engine.start_kex()
+
+ def _check_banner(self):
+ # this is slow, but we only have to do it once
+ for i in range(5):
+ # give them 5 seconds for the first line, then just 2 seconds each additional line
+ if i == 0:
+ timeout = 5
+ else:
+ timeout = 2
+ try:
+ buffer = self.packetizer.readline(timeout)
+ except Exception, x:
+ raise SSHException('Error reading SSH protocol banner' + str(x))
+ if buffer[:4] == 'SSH-':
+ break
+ self._log(DEBUG, 'Banner: ' + buffer)
+ if buffer[:4] != 'SSH-':
+ raise SSHException('Indecipherable protocol version "' + buffer + '"')
+ # save this server version string for later
+ self.remote_version = buffer
+ # pull off any attached comment
+ comment = ''
+ i = string.find(buffer, ' ')
+ if i >= 0:
+ comment = buffer[i+1:]
+ buffer = buffer[:i]
+ # parse out version string and make sure it matches
+ segs = buffer.split('-', 2)
+ if len(segs) < 3:
+ raise SSHException('Invalid SSH banner')
+ version = segs[1]
+ client = segs[2]
+ if version != '1.99' and version != '2.0':
+ raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
+ self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
+
+ def _send_kex_init(self):
+ """
+ announce to the other side that we'd like to negotiate keys, and what
+ kind of key negotiation we support.
+ """
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.clear()
+ finally:
+ self.clear_to_send_lock.release()
+ self.in_kex = True
+ if self.server_mode:
+ if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
+ # can't do group-exchange if we don't have a pack of potential primes
+ pkex = list(self.get_security_options().kex)
+ pkex.remove('diffie-hellman-group-exchange-sha1')
+ self.get_security_options().kex = pkex
+ available_server_keys = filter(self.server_key_dict.keys().__contains__,
+ self._preferred_keys)
+ else:
+ available_server_keys = self._preferred_keys
+
+ randpool.stir()
+ m = Message()
+ m.add_byte(chr(MSG_KEXINIT))
+ m.add_bytes(randpool.get_bytes(16))
+ m.add_list(self._preferred_kex)
+ m.add_list(available_server_keys)
+ m.add_list(self._preferred_ciphers)
+ m.add_list(self._preferred_ciphers)
+ m.add_list(self._preferred_macs)
+ m.add_list(self._preferred_macs)
+ m.add_list(self._preferred_compression)
+ m.add_list(self._preferred_compression)
+ m.add_string('')
+ m.add_string('')
+ m.add_boolean(False)
+ m.add_int(0)
+ # save a copy for later (needed to compute a hash)
+ self.local_kex_init = str(m)
+ self._send_message(m)
+
+ def _parse_kex_init(self, m):
+ cookie = m.get_bytes(16)
+ kex_algo_list = m.get_list()
+ server_key_algo_list = m.get_list()
+ client_encrypt_algo_list = m.get_list()
+ server_encrypt_algo_list = m.get_list()
+ client_mac_algo_list = m.get_list()
+ server_mac_algo_list = m.get_list()
+ client_compress_algo_list = m.get_list()
+ server_compress_algo_list = m.get_list()
+ client_lang_list = m.get_list()
+ server_lang_list = m.get_list()
+ kex_follows = m.get_boolean()
+ unused = m.get_int()
+
+ self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) + \
+ ' client encrypt:' + str(client_encrypt_algo_list) + \
+ ' server encrypt:' + str(server_encrypt_algo_list) + \
+ ' client mac:' + str(client_mac_algo_list) + \
+ ' server mac:' + str(server_mac_algo_list) + \
+ ' client compress:' + str(client_compress_algo_list) + \
+ ' server compress:' + str(server_compress_algo_list) + \
+ ' client lang:' + str(client_lang_list) + \
+ ' server lang:' + str(server_lang_list) + \
+ ' kex follows?' + str(kex_follows))
+
+ # as a server, we pick the first item in the client's list that we support.
+ # as a client, we pick the first item in our list that the server supports.
+ if self.server_mode:
+ agreed_kex = filter(self._preferred_kex.__contains__, kex_algo_list)
+ else:
+ agreed_kex = filter(kex_algo_list.__contains__, self._preferred_kex)
+ if len(agreed_kex) == 0:
+ raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
+ self.kex_engine = self._kex_info[agreed_kex[0]](self)
+
+ if self.server_mode:
+ available_server_keys = filter(self.server_key_dict.keys().__contains__,
+ self._preferred_keys)
+ agreed_keys = filter(available_server_keys.__contains__, server_key_algo_list)
+ else:
+ agreed_keys = filter(server_key_algo_list.__contains__, self._preferred_keys)
+ if len(agreed_keys) == 0:
+ raise SSHException('Incompatible ssh peer (no acceptable host key)')
+ self.host_key_type = agreed_keys[0]
+ if self.server_mode and (self.get_server_key() is None):
+ raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
+
+ if self.server_mode:
+ agreed_local_ciphers = filter(self._preferred_ciphers.__contains__,
+ server_encrypt_algo_list)
+ agreed_remote_ciphers = filter(self._preferred_ciphers.__contains__,
+ client_encrypt_algo_list)
+ else:
+ agreed_local_ciphers = filter(client_encrypt_algo_list.__contains__,
+ self._preferred_ciphers)
+ agreed_remote_ciphers = filter(server_encrypt_algo_list.__contains__,
+ self._preferred_ciphers)
+ if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
+ raise SSHException('Incompatible ssh server (no acceptable ciphers)')
+ self.local_cipher = agreed_local_ciphers[0]
+ self.remote_cipher = agreed_remote_ciphers[0]
+ self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
+
+ if self.server_mode:
+ agreed_remote_macs = filter(self._preferred_macs.__contains__, client_mac_algo_list)
+ agreed_local_macs = filter(self._preferred_macs.__contains__, server_mac_algo_list)
+ else:
+ agreed_local_macs = filter(client_mac_algo_list.__contains__, self._preferred_macs)
+ agreed_remote_macs = filter(server_mac_algo_list.__contains__, self._preferred_macs)
+ if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
+ raise SSHException('Incompatible ssh server (no acceptable macs)')
+ self.local_mac = agreed_local_macs[0]
+ self.remote_mac = agreed_remote_macs[0]
+
+ if self.server_mode:
+ agreed_remote_compression = filter(self._preferred_compression.__contains__, client_compress_algo_list)
+ agreed_local_compression = filter(self._preferred_compression.__contains__, server_compress_algo_list)
+ else:
+ agreed_local_compression = filter(client_compress_algo_list.__contains__, self._preferred_compression)
+ agreed_remote_compression = filter(server_compress_algo_list.__contains__, self._preferred_compression)
+ if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
+ raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
+ self.local_compression = agreed_local_compression[0]
+ self.remote_compression = agreed_remote_compression[0]
+
+ self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
+ (agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
+ self.remote_mac, self.local_compression, self.remote_compression))
+
+ # save for computing hash later...
+ # now wait! openssh has a bug (and others might too) where there are
+ # actually some extra bytes (one NUL byte in openssh's case) added to
+ # the end of the packet but not parsed. turns out we need to throw
+ # away those bytes because they aren't part of the hash.
+ self.remote_kex_init = chr(MSG_KEXINIT) + m.get_so_far()
+
+ def _activate_inbound(self):
+ "switch on newly negotiated encryption parameters for inbound traffic"
+ block_size = self._cipher_info[self.remote_cipher]['block-size']
+ if self.server_mode:
+ IV_in = self._compute_key('A', block_size)
+ key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
+ else:
+ IV_in = self._compute_key('B', block_size)
+ key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
+ engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
+ mac_size = self._mac_info[self.remote_mac]['size']
+ mac_engine = self._mac_info[self.remote_mac]['class']
+ # initial mac keys are done in the hash's natural size (not the potentially truncated
+ # transmission size)
+ if self.server_mode:
+ mac_key = self._compute_key('E', mac_engine.digest_size)
+ else:
+ mac_key = self._compute_key('F', mac_engine.digest_size)
+ self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
+ compress_in = self._compression_info[self.remote_compression][1]
+ if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
+ self._log(DEBUG, 'Switching on inbound compression ...')
+ self.packetizer.set_inbound_compressor(compress_in())
+
+ def _activate_outbound(self):
+ "switch on newly negotiated encryption parameters for outbound traffic"
+ m = Message()
+ m.add_byte(chr(MSG_NEWKEYS))
+ self._send_message(m)
+ block_size = self._cipher_info[self.local_cipher]['block-size']
+ if self.server_mode:
+ IV_out = self._compute_key('B', block_size)
+ key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
+ else:
+ IV_out = self._compute_key('A', block_size)
+ key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
+ engine = self._get_cipher(self.local_cipher, key_out, IV_out)
+ mac_size = self._mac_info[self.local_mac]['size']
+ mac_engine = self._mac_info[self.local_mac]['class']
+ # initial mac keys are done in the hash's natural size (not the potentially truncated
+ # transmission size)
+ if self.server_mode:
+ mac_key = self._compute_key('F', mac_engine.digest_size)
+ else:
+ mac_key = self._compute_key('E', mac_engine.digest_size)
+ self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
+ compress_out = self._compression_info[self.local_compression][0]
+ if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
+ self._log(DEBUG, 'Switching on outbound compression ...')
+ self.packetizer.set_outbound_compressor(compress_out())
+ if not self.packetizer.need_rekey():
+ self.in_kex = False
+ # we always expect to receive NEWKEYS now
+ self.expected_packet = MSG_NEWKEYS
+
+ def _auth_trigger(self):
+ self.authenticated = True
+ # delayed initiation of compression
+ if self.local_compression == 'zlib@openssh.com':
+ compress_out = self._compression_info[self.local_compression][0]
+ self._log(DEBUG, 'Switching on outbound compression ...')
+ self.packetizer.set_outbound_compressor(compress_out())
+ if self.remote_compression == 'zlib@openssh.com':
+ compress_in = self._compression_info[self.remote_compression][1]
+ self._log(DEBUG, 'Switching on inbound compression ...')
+ self.packetizer.set_inbound_compressor(compress_in())
+
+ def _parse_newkeys(self, m):
+ self._log(DEBUG, 'Switch to new keys ...')
+ self._activate_inbound()
+ # can also free a bunch of stuff here
+ self.local_kex_init = self.remote_kex_init = None
+ self.K = None
+ self.kex_engine = None
+ if self.server_mode and (self.auth_handler is None):
+ # create auth handler for server mode
+ self.auth_handler = AuthHandler(self)
+ if not self.initial_kex_done:
+ # this was the first key exchange
+ self.initial_kex_done = True
+ # send an event?
+ if self.completion_event != None:
+ self.completion_event.set()
+ # it's now okay to send data again (if this was a re-key)
+ if not self.packetizer.need_rekey():
+ self.in_kex = False
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.set()
+ finally:
+ self.clear_to_send_lock.release()
+ return
+
+ def _parse_disconnect(self, m):
+ code = m.get_int()
+ desc = m.get_string()
+ self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
+
+ def _parse_global_request(self, m):
+ kind = m.get_string()
+ self._log(DEBUG, 'Received global request "%s"' % kind)
+ want_reply = m.get_boolean()
+ ok = self.server_object.check_global_request(kind, m)
+ extra = ()
+ if type(ok) is tuple:
+ extra = ok
+ ok = True
+ if want_reply:
+ msg = Message()
+ if ok:
+ msg.add_byte(chr(MSG_REQUEST_SUCCESS))
+ msg.add(*extra)
+ else:
+ msg.add_byte(chr(MSG_REQUEST_FAILURE))
+ self._send_message(msg)
+
+ def _parse_request_success(self, m):
+ self._log(DEBUG, 'Global request successful.')
+ self.global_response = m
+ if self.completion_event is not None:
+ self.completion_event.set()
+
+ def _parse_request_failure(self, m):
+ self._log(DEBUG, 'Global request denied.')
+ self.global_response = None
+ if self.completion_event is not None:
+ self.completion_event.set()
+
+ def _parse_channel_open_success(self, m):
+ chanid = m.get_int()
+ server_chanid = m.get_int()
+ server_window_size = m.get_int()
+ server_max_packet_size = m.get_int()
+ if not self.channels.has_key(chanid):
+ self._log(WARNING, 'Success for unrequested channel! [??]')
+ return
+ self.lock.acquire()
+ try:
+ chan = self.channels[chanid]
+ chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
+ self._log(INFO, 'Secsh channel %d opened.' % chanid)
+ if self.channel_events.has_key(chanid):
+ self.channel_events[chanid].set()
+ del self.channel_events[chanid]
+ finally:
+ self.lock.release()
+ return
+
+ def _parse_channel_open_failure(self, m):
+ chanid = m.get_int()
+ reason = m.get_int()
+ reason_str = m.get_string()
+ lang = m.get_string()
+ if CONNECTION_FAILED_CODE.has_key(reason):
+ reason_text = CONNECTION_FAILED_CODE[reason]
+ else:
+ reason_text = '(unknown code)'
+ self._log(INFO, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
+ try:
+ self.lock.aquire()
+ if self.channels.has_key(chanid):
+ del self.channels[chanid]
+ if self.channel_events.has_key(chanid):
+ self.channel_events[chanid].set()
+ del self.channel_events[chanid]
+ finally:
+ self.lock.release()
+ return
+
+ def _parse_channel_open(self, m):
+ kind = m.get_string()
+ chanid = m.get_int()
+ initial_window_size = m.get_int()
+ max_packet_size = m.get_int()
+ reject = False
+ if not self.server_mode:
+ self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
+ reject = True
+ reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+ else:
+ self.lock.acquire()
+ try:
+ my_chanid = self.channel_counter
+ while self.channels.has_key(my_chanid):
+ self.channel_counter = (self.channel_counter + 1) & 0xffffff
+ my_chanid = self.channel_counter
+ self.channel_counter = (self.channel_counter + 1) & 0xffffff
+ finally:
+ self.lock.release()
+ reason = self.server_object.check_channel_request(kind, my_chanid)
+ if reason != OPEN_SUCCEEDED:
+ self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
+ reject = True
+ if reject:
+ msg = Message()
+ msg.add_byte(chr(MSG_CHANNEL_OPEN_FAILURE))
+ msg.add_int(chanid)
+ msg.add_int(reason)
+ msg.add_string('')
+ msg.add_string('en')
+ self._send_message(msg)
+ return
+ chan = Channel(my_chanid)
+ try:
+ self.lock.acquire()
+ self.channels[my_chanid] = chan
+ self.channels_seen[my_chanid] = True
+ chan._set_transport(self)
+ chan._set_window(self.window_size, self.max_packet_size)
+ chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
+ finally:
+ self.lock.release()
+ m = Message()
+ m.add_byte(chr(MSG_CHANNEL_OPEN_SUCCESS))
+ m.add_int(chanid)
+ m.add_int(my_chanid)
+ m.add_int(self.window_size)
+ m.add_int(self.max_packet_size)
+ self._send_message(m)
+ self._log(INFO, 'Secsh channel %d opened.' % my_chanid)
+ try:
+ self.lock.acquire()
+ self.server_accepts.append(chan)
+ self.server_accept_cv.notify()
+ finally:
+ self.lock.release()
+
+ def _parse_debug(self, m):
+ always_display = m.get_boolean()
+ msg = m.get_string()
+ lang = m.get_string()
+ self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
+
+ def _get_subsystem_handler(self, name):
+ try:
+ self.lock.acquire()
+ if not self.subsystem_table.has_key(name):
+ return (None, [], {})
+ return self.subsystem_table[name]
+ finally:
+ self.lock.release()
+
+ _handler_table = {
+ MSG_NEWKEYS: _parse_newkeys,
+ MSG_GLOBAL_REQUEST: _parse_global_request,
+ MSG_REQUEST_SUCCESS: _parse_request_success,
+ MSG_REQUEST_FAILURE: _parse_request_failure,
+ MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
+ MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
+ MSG_CHANNEL_OPEN: _parse_channel_open,
+ MSG_KEXINIT: _negotiate_keys,
+ }
+
+ _channel_handler_table = {
+ MSG_CHANNEL_SUCCESS: Channel._request_success,
+ MSG_CHANNEL_FAILURE: Channel._request_failed,
+ MSG_CHANNEL_DATA: Channel._feed,
+ MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
+ MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
+ MSG_CHANNEL_REQUEST: Channel._handle_request,
+ MSG_CHANNEL_EOF: Channel._handle_eof,
+ MSG_CHANNEL_CLOSE: Channel._handle_close,
+ }
diff --git a/paramiko/util.py b/paramiko/util.py
new file mode 100644
index 0000000..abab825
--- /dev/null
+++ b/paramiko/util.py
@@ -0,0 +1,357 @@
+# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+"""
+Useful functions used by the rest of paramiko.
+"""
+
+from __future__ import generators
+
+import fnmatch
+import sys
+import struct
+import traceback
+import threading
+
+from paramiko.common import *
+
+
+# Change by RogerB - python < 2.3 doesn't have enumerate so we implement it
+if sys.version_info < (2,3):
+ class enumerate:
+ def __init__ (self, sequence):
+ self.sequence = sequence
+ def __iter__ (self):
+ count = 0
+ for item in self.sequence:
+ yield (count, item)
+ count += 1
+
+
+def inflate_long(s, always_positive=False):
+ "turns a normalized byte string into a long-int (adapted from Crypto.Util.number)"
+ out = 0L
+ negative = 0
+ if not always_positive and (len(s) > 0) and (ord(s[0]) >= 0x80):
+ negative = 1
+ if len(s) % 4:
+ filler = '\x00'
+ if negative:
+ filler = '\xff'
+ s = filler * (4 - len(s) % 4) + s
+ for i in range(0, len(s), 4):
+ out = (out << 32) + struct.unpack('>I', s[i:i+4])[0]
+ if negative:
+ out -= (1L << (8 * len(s)))
+ return out
+
+def deflate_long(n, add_sign_padding=True):
+ "turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"
+ # after much testing, this algorithm was deemed to be the fastest
+ s = ''
+ n = long(n)
+ while (n != 0) and (n != -1):
+ s = struct.pack('>I', n & 0xffffffffL) + s
+ n = n >> 32
+ # strip off leading zeros, FFs
+ for i in enumerate(s):
+ if (n == 0) and (i[1] != '\000'):
+ break
+ if (n == -1) and (i[1] != '\xff'):
+ break
+ else:
+ # degenerate case, n was either 0 or -1
+ i = (0,)
+ if n == 0:
+ s = '\000'
+ else:
+ s = '\xff'
+ s = s[i[0]:]
+ if add_sign_padding:
+ if (n == 0) and (ord(s[0]) >= 0x80):
+ s = '\x00' + s
+ if (n == -1) and (ord(s[0]) < 0x80):
+ s = '\xff' + s
+ return s
+
+def format_binary_weird(data):
+ out = ''
+ for i in enumerate(data):
+ out += '%02X' % ord(i[1])
+ if i[0] % 2:
+ out += ' '
+ if i[0] % 16 == 15:
+ out += '\n'
+ return out
+
+def format_binary(data, prefix=''):
+ x = 0
+ out = []
+ while len(data) > x + 16:
+ out.append(format_binary_line(data[x:x+16]))
+ x += 16
+ if x < len(data):
+ out.append(format_binary_line(data[x:]))
+ return [prefix + x for x in out]
+
+def format_binary_line(data):
+ left = ' '.join(['%02X' % ord(c) for c in data])
+ right = ''.join([('.%c..' % c)[(ord(c)+63)//95] for c in data])
+ return '%-50s %s' % (left, right)
+
+def hexify(s):
+ "turn a string into a hex sequence"
+ return ''.join(['%02X' % ord(c) for c in s])
+
+def unhexify(s):
+ "turn a hex sequence back into a string"
+ return ''.join([chr(int(s[i:i+2], 16)) for i in range(0, len(s), 2)])
+
+def safe_string(s):
+ out = ''
+ for c in s:
+ if (ord(c) >= 32) and (ord(c) <= 127):
+ out += c
+ else:
+ out += '%%%02X' % ord(c)
+ return out
+
+# ''.join([['%%%02X' % ord(c), c][(ord(c) >= 32) and (ord(c) <= 127)] for c in s])
+
+def bit_length(n):
+ norm = deflate_long(n, 0)
+ hbyte = ord(norm[0])
+ bitlen = len(norm) * 8
+ while not (hbyte & 0x80):
+ hbyte <<= 1
+ bitlen -= 1
+ return bitlen
+
+def tb_strings():
+ return ''.join(traceback.format_exception(*sys.exc_info())).split('\n')
+
+def generate_key_bytes(hashclass, salt, key, nbytes):
+ """
+ Given a password, passphrase, or other human-source key, scramble it
+ through a secure hash into some keyworthy bytes. This specific algorithm
+ is used for encrypting/decrypting private key files.
+
+ @param hashclass: class from L{Crypto.Hash} that can be used as a secure
+ hashing function (like C{MD5} or C{SHA}).
+ @type hashclass: L{Crypto.Hash}
+ @param salt: data to salt the hash with.
+ @type salt: string
+ @param key: human-entered password or passphrase.
+ @type key: string
+ @param nbytes: number of bytes to generate.
+ @type nbytes: int
+ @return: key data
+ @rtype: string
+ """
+ keydata = ''
+ digest = ''
+ if len(salt) > 8:
+ salt = salt[:8]
+ while nbytes > 0:
+ hash = hashclass.new()
+ if len(digest) > 0:
+ hash.update(digest)
+ hash.update(key)
+ hash.update(salt)
+ digest = hash.digest()
+ size = min(nbytes, len(digest))
+ keydata += digest[:size]
+ nbytes -= size
+ return keydata
+
+def load_host_keys(filename):
+ """
+ Read a file of known SSH host keys, in the format used by openssh, and
+ return a compound dict of C{hostname -> keytype ->} L{PKey <paramiko.pkey.PKey>}.
+ The hostname may be an IP address or DNS name. The keytype will be either
+ C{"ssh-rsa"} or C{"ssh-dss"}.
+
+ This type of file unfortunately doesn't exist on Windows, but on posix,
+ it will usually be stored in C{os.path.expanduser("~/.ssh/known_hosts")}.
+
+ @param filename: name of the file to read host keys from
+ @type filename: str
+ @return: dict of host keys, indexed by hostname and then keytype
+ @rtype: dict(hostname, dict(keytype, L{PKey <paramiko.pkey.PKey>}))
+ """
+ import base64
+ from rsakey import RSAKey
+ from dsskey import DSSKey
+
+ keys = {}
+ f = file(filename, 'r')
+ for line in f:
+ line = line.strip()
+ if (len(line) == 0) or (line[0] == '#'):
+ continue
+ keylist = line.split(' ')
+ if len(keylist) != 3:
+ continue
+ hostlist, keytype, key = keylist
+ hosts = hostlist.split(',')
+ for host in hosts:
+ if not keys.has_key(host):
+ keys[host] = {}
+ if keytype == 'ssh-rsa':
+ keys[host][keytype] = RSAKey(data=base64.decodestring(key))
+ elif keytype == 'ssh-dss':
+ keys[host][keytype] = DSSKey(data=base64.decodestring(key))
+ f.close()
+ return keys
+
+def parse_ssh_config(file_obj):
+ """
+ Parse a config file of the format used by OpenSSH, and return an object
+ that can be used to make queries to L{lookup_ssh_host_config}. The
+ format is described in OpenSSH's C{ssh_config} man page. This method is
+ provided primarily as a convenience to posix users (since the OpenSSH
+ format is a de-facto standard on posix) but should work fine on Windows
+ too.
+
+ The return value is currently a list of dictionaries, each containing
+ host-specific configuration, but this is considered an implementation
+ detail and may be subject to change in later versions.
+
+ @param file_obj: a file-like object to read the config file from
+ @type file_obj: file
+ @return: opaque configuration object
+ @rtype: object
+ """
+ ret = []
+ config = { 'host': '*' }
+ ret.append(config)
+
+ for line in file_obj:
+ line = line.rstrip('\n').lstrip()
+ if (line == '') or (line[0] == '#'):
+ continue
+ if '=' in line:
+ key, value = line.split('=', 1)
+ key = key.strip().lower()
+ else:
+ # find first whitespace, and split there
+ i = 0
+ while (i < len(line)) and not line[i].isspace():
+ i += 1
+ if i == len(line):
+ raise Exception('Unparsable line: %r' % line)
+ key = line[:i].lower()
+ value = line[i:].lstrip()
+
+ if key == 'host':
+ # do we have a pre-existing host config to append to?
+ matches = [c for c in ret if c['host'] == value]
+ if len(matches) > 0:
+ config = matches[0]
+ else:
+ config = { 'host': value }
+ ret.append(config)
+ else:
+ config[key] = value
+
+ return ret
+
+def lookup_ssh_host_config(hostname, config):
+ """
+ Return a dict of config options for a given hostname. The C{config} object
+ must come from L{parse_ssh_config}.
+
+ The host-matching rules of OpenSSH's C{ssh_config} man page are used, which
+ means that all configuration options from matching host specifications are
+ merged, with more specific hostmasks taking precedence. In other words, if
+ C{"Port"} is set under C{"Host *"} and also C{"Host *.example.com"}, and
+ the lookup is for C{"ssh.example.com"}, then the port entry for
+ C{"Host *.example.com"} will win out.
+
+ The keys in the returned dict are all normalized to lowercase (look for
+ C{"port"}, not C{"Port"}. No other processing is done to the keys or
+ values.
+
+ @param hostname: the hostname to lookup
+ @type hostname: str
+ @param config: the config object to search
+ @type config: object
+ """
+ matches = [x for x in config if fnmatch.fnmatch(hostname, x['host'])]
+ # sort in order of shortest match (usually '*') to longest
+ matches.sort(key=lambda x: len(x['host']))
+ ret = {}
+ for m in matches:
+ ret.update(m)
+ del ret['host']
+ return ret
+
+def mod_inverse(x, m):
+ # it's crazy how small python can make this function.
+ u1, u2, u3 = 1, 0, m
+ v1, v2, v3 = 0, 1, x
+
+ while v3 > 0:
+ q = u3 // v3
+ u1, v1 = v1, u1 - v1 * q
+ u2, v2 = v2, u2 - v2 * q
+ u3, v3 = v3, u3 - v3 * q
+ if u2 < 0:
+ u2 += m
+ return u2
+
+_g_thread_ids = {}
+_g_thread_counter = 0
+_g_thread_lock = threading.Lock()
+def get_thread_id():
+ global _g_thread_ids, _g_thread_counter, _g_thread_lock
+ tid = id(threading.currentThread())
+ try:
+ return _g_thread_ids[tid]
+ except KeyError:
+ _g_thread_lock.acquire()
+ try:
+ _g_thread_counter += 1
+ ret = _g_thread_ids[tid] = _g_thread_counter
+ finally:
+ _g_thread_lock.release()
+ return ret
+
+def log_to_file(filename, level=DEBUG):
+ "send paramiko logs to a logfile, if they're not already going somewhere"
+ l = logging.getLogger("paramiko")
+ if len(l.handlers) > 0:
+ return
+ l.setLevel(level)
+ f = open(filename, 'w')
+ lh = logging.StreamHandler(f)
+ lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s',
+ '%Y%m%d-%H:%M:%S'))
+ l.addHandler(lh)
+
+# make only one filter object, so it doesn't get applied more than once
+class PFilter (object):
+ def filter(self, record):
+ record._threadid = get_thread_id()
+ return True
+_pfilter = PFilter()
+
+def get_logger(name):
+ l = logging.getLogger(name)
+ l.addFilter(_pfilter)
+ return l