Greetings,<br><br>I have a problem I hope someone here can assist with. I'm using TwistedSNMP to query a bunch of SNMP devices asynchronously. The behavior I'm trying to get is to get is to query all the devices via SNMP, each query returns a deferred, and when all their callbacks have been fired then I want to stop the reactor and thereby exit the program. I thought perhaps I could put each of these SNMP deferreds in a DeferredList and add a callback to the DeferredList that would stop the reactor but it does not do that. Enclosed is a code sample. Am I doing something incorrectly, or should I do something different?<br>
<br>Thanks! -Don<br><br>"""Trivial example to retrieve an OID from a remote Agent"""<br>from twisted.internet import reactor<br>from twistedsnmp import snmpprotocol, agentproxy<br>from twisted.enterprise import adbapi<br>
from twisted.internet import defer<br>import os<br><br>APPNAME = 'ClearSNMP'<br>d_results = {} #dictionary to store results<br>device_name = 'Unknown'<br><br>db_conn = {'user':'sa',<br> 'password':'password',<br>
'host':'localhost',<br> 'database':'ClearSNMP'<br> }<br><br>community_string = 'public'<br><br>deferred_list = [] #list to help group the snmp requests<br>
<br>outfile = open('outfile.csv','wb',0)<br>#add headers to the outfile<br>outfile.write("device_name,link_oid,link_name,link_capacity_oid,link_capacity,traffic_in_oid,traffic_in,traffic_out_oid,traffic_out")<br>
<br>#create the database connection pool<br>dbpool = adbapi.ConnectionPool("pymssql", user=db_conn['user'], password=db_conn['password'], host=db_conn['host'], database=db_conn['database'])<br>
<br> <br>def main( class_handler, proxy, oids ):<br> """Do a getTable on proxy for OIDs and store in oidStore"""<br> df = proxy.getTable(<br> oids, timeout=.25, retryCount=5<br>
)<br> if class_handler == 'tasman':<br> df.addCallback( tasmanResults )<br> else:<br> df.addCallback( results )<br> #df.addCallback( exiter )<br> df.addErrback( errorReporter, proxy )<br>
#df.addErrback( exiter )<br> return df<br><br><br>def tasmanResults( result ):<br> <br> """Results 'appear' to be a nested dictionary, but it is really an object of OIDs. I figured out how to get to the OIDs by<br>
casting them as a dictionary using the built_in dict() function. Now I can iterate over all the OIDs."""<br> #print 'Results:'<br> d_table_key = {}<br> for table_key in result.keys(): <br>
#get the device name. for some reason i have to do this in a loop as just saying dict(result[table_key])['.1.3.6.1.2.1.1.5.0'] doesn't work<br> for oid in dict(result[table_key]).keys(): <br>
if oid=='.1.3.6.1.2.1.1.5.0':<br> device_name=str(dict(result[table_key])[oid])<br> <br> d_oid = {}<br> for oid in dict(result[table_key]).keys(): <br>
d_oid[str(oid)]=dict(result[table_key])[oid]<br> d_table_key[str(table_key)] = d_oid<br> d_results[device_name]=d_table_key<br> <br> #specify the table oids so we can match them appropriately later<br>
link_name_table = ".1.3.6.1.2.1.2.2.1.2"<br> link_capacity_table = ".1.3.6.1.2.1.2.2.1.5"<br> traffic_in_table = ".1.3.6.1.2.1.2.2.1.10"<br> traffic_out_table = ".1.3.6.1.2.1.2.2.1.16"<br>
<br> # For each link name in the table I need to get the values from the link_capacity, traffic_in and traffic_out tables and put them in the same line<br> <br> for i in d_results.keys():<br> d_row = {} #holds the column values for a row<br>
<br> #set device_name in Row<br> d_row['device_name'] = device_name<br> <br> for k in d_results[i][link_name_table]:<br> #set link_oid and link_name in Row<br> d_row['link_oid'] = k<br>
d_row['link_name'] = d_results[i][link_name_table][d_row['link_oid']]<br> <br> #lookup the capacity metric for this link_oid<br> #create the oid to lookup<br> d_row['link_capacity_oid'] = d_row['link_oid'].replace(link_name_table,link_capacity_table)<br>
d_row['link_capacity'] = d_results[i][link_capacity_table][d_row['link_capacity_oid']]<br> <br> #lookup the traffic_in metric for this link_oid<br> #create the oid to lookup<br>
d_row['traffic_in_oid'] = d_row['link_oid'].replace(link_name_table,traffic_in_table)<br> d_row['traffic_in'] = d_results[i][traffic_in_table][d_row['traffic_in_oid']]<br>
<br> #lookup the traffic_out metric for this link_oid<br> #create the oid to lookup<br> d_row['traffic_out_oid'] = d_row['link_oid'].replace(link_name_table,traffic_out_table)<br>
d_row['traffic_out'] = d_results[i][traffic_out_table][d_row['traffic_out_oid']]<br> <br> #Calculate Utilization - if we can<br><br><br> #print d_row<br> out = d_row['device_name']+","+d_row['link_oid']+","+d_row['link_name']+","+d_row['link_capacity_oid']+","+str(d_row['link_capacity'])+","+d_row['traffic_in_oid']+","+str(d_row['traffic_in'])+","+d_row['traffic_out_oid']+","+str(d_row['traffic_out'])+'\r\n'<br>
#print out<br> outfile.write(out)<br> return result<br><br><br>def errorReporter( err, proxy ):<br> #print 'ERROR', err.getTraceback()<br> #log the failed snmp query attempt<br> print 'Failed to retrieve SNMP counters from agent:',proxy<br>
return err<br><br>def exiter( value ):<br> <br> reactor.stop()<br> outfile.close()<br> <br> return value<br><br><br>def getNetworkElements():<br> return dbpool.runQuery("select top 10 ip, mkt, dns_name, dns_fqdn from dns where dns_type='TASMAN'")<br>
<br>def printResult(l):<br> for item in l:<br> print "Fetching counters for "+item[2]<br> #deferred_list.append(snmpSetup(item[0], 161, 'ctipublic','tasman'))<br> ipAddress = item[0]<br>
portno = 161<br> community = community_string<br> class_handler = 'tasman'<br> print ipAddress,portno<br> # choose random port in range 25000 to 30000<br> port = snmpprotocol.port()<br>
targetPort = int(portno)<br> proxy = agentproxy.AgentProxy(ipAddress, <br> targetPort, <br> community = community,<br> snmpVersion = 'v1',<br> protocol = port.protocol,<br>
)<br><br><br> d_oids = {'.1.3.6.1.2.1.1':"System Tables",<br> '.1.3.6.1.2.1.2.2.1.2':"Circuit Name",<br> '.1.3.6.1.2.1.2.2.1.5':"Capacity",<br>
'.1.3.6.1.2.1.2.2.1.10':"Traffic In",<br> '.1.3.6.1.2.1.2.2.1.16':"Traffic Out"<br> }<br><br> """Do a getTable on proxy for OIDs and store in oidStore"""<br>
df = proxy.getTable(<br> d_oids, timeout=.25, retryCount=5<br> )<br> if class_handler == 'tasman':<br> df.addCallback( tasmanResults )<br> else:<br> df.addCallback( results )<br> df.addErrback( errorReporter, proxy )<br>
deferred_list.append(df)<br> return <br><br><br><br>if __name__ == "__main__":<br> import sys<br> #start the log service<br> from twisted.python import log<br> from twisted.python import logfile<br>
# rotate every 100000000 bytes<br> f = logfile.LogFile(APPNAME+str(os.getpid())+".log", "Logs", rotateLength=100000000)<br> # setup logging to use our new logfile<br> #log.startLogging(f)<br>
<br> <br> g = getNetworkElements().addCallback(printResult)<br> dl = defer.DeferredList(deferred_list, 0, 0, 1 )<br> print dir(dl)<br> dl.addCallback(exiter)<br> <br> reactor.run()<br>