CrazyPython
12/7/2012 - 11:09 PM

Useful libary of snippets(help improve)

Useful libary of snippets(help improve)


"version .1 supports __getitem__ and __setitem__ and fill and fillrect"
"version .2 supports __str__ and __dict__"
"""
version .3  
full iteriator support
__len__ and __contains__
added documentation
__setitem__  and __setitem__supports slices with support for versions below 2.0(very unlikley)
"""
import sys,threading
class i:
    def __init__(self,l):
        self.pos = 0
        self.l = l
    def __iter__(self):
        return self
    def next(self):
        if self.pos == len(self.l):
            raise StopIteration()
        value = self.l[self.pos]
        self.pos += 1
        return value
        
class grid:
    """
    mutable object
    deletion not supported(just wacky)
    no order(wacky and uses a dictionary)
    slicing is possible, ignores step
    """
    def _check(self,var):
        if len(var) != 4:
            raise TypeError("size needs to be a length of 4, not "+len(var)+".")
        for i in var:
            if type(i) != int:
                raise TypeError("Every item in size must be a integer")
    def __init__(self,size,default = None,large=False):
        "__init__([lowx,lowy,highx,highy],default=None) --> grid object"
        "For large inputs, use large = True so you can show a loading status"
        self.is_init = True
        if large:
            thread = threading.Thread(target = self.__init__,args = (size,default))
            thread.start()
        else:
            self._check(size)
            self.lowx = size[0]
            self.lowy = size[1]
            self.highx = size[2]+1
            self.highy = size[3]+1
            self.g = {}
            for x in range(self.lowx,self.highx):
                for y in range(self.lowy,self.highy):
                    self.g[x,y] = default
            self.is_init = False
    def __getitem__(self,arg):
        if type(arg) == slice:
            self._ghelper2(arg.start,arg.stop)
        else:
            return self._ghelper(arg)
    def _ghelper(self,arg):
        return self.g[arg]
    def _ghelper2(self,start,stop):
        ret = []
        for x in range(start[0],stop[0]):
            for y in range(stop[1],start[1]):
def find_key(symbol_dic, val):
    return [k for k, v in symbol_dic.iteritems() if v == val][0]
def striplist(l):
    "great with readlines"
    return([x.strip() for x in l])
def splitlist(l,k=' '):
    return([x.split(k) for x in l])
import urllib
from HTMLParser import HTMLParser
from re import sub
from sys import stderr
from traceback import print_exc
def getsite(string="",error="cannot find website",recerror=""):
    website = raw_input(string)
    try:site = urllib.urlopen(website)
    except:
        try:urllib.urlopen("http://"+website)
        except:
            try:site = urllib.urlopen("https://"+website)
            except:
                website = "www." + website
                try:site = urllib.urlopen(website)
                except:
                    try:site =  urllib.urlopen("http://"+website)
                    except:
                        try:site = urllib.urlopen("https://"+website)
                        except:
                            print error
                            try:return getsite()
                            except RuntimeError:
                                if recerror != "": 
                                    print recerror
                            except:
                                print"Error asking again"
                                time.sleep(1)
    return site
def pushinput(string="",error="cannot find website",recerror=""):
    ret = None
    while ret == None:
        ret = getsite(string,error,recerror)
    return ret

class _DeHTMLParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        self.__text = []

    def handle_data(self, data):
        text = data.strip()
        if len(text) > 0:
            text = sub('[ \t\r\n]+', ' ', text)
            self.__text.append(text + ' ')

    def handle_starttag(self, tag, attrs):
        if tag == 'p':
            self.__text.append('\n\n')
        elif tag == 'br':
            self.__text.append('\n')

    def handle_startendtag(self, tag, attrs):
        if tag == 'br':
            self.__text.append('\n\n')

    def text(self):
        return ''.join(self.__text).strip()


def dehtml(text):
    try:
        parser = _DeHTMLParser()
        parser.feed(text)
        parser.close()
        return parser.text()
    except:
        print_exc(file=stderr)
        return text
def get_link(page):
    "page -> url,end"
    start_link = page.find('<a href=')
    start_link += len('<a href=') +1
    url = page[start_link:]
    stop = url.find('/"')
    stop -=1
    url = page[start_link:stop]
    return url,stop
def get_links(page):
    links = []
    while '<a href=' in page:
        data = get_links(page)
        links.append(data[0])
        page = page[data[1]:]