X Tutup
Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cryptocurrency-converter/CryptoConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def currencies2(self, item2):
# Live data from API
def api(self, cur1, cur2):
api_link = "https://min-api.cryptocompare.com/data/pricemulti?fsyms={}&tsyms={}".format(cur1, cur2)
resp = requests.get(api_link)
resp = requests.get(api_link, timeout=60)
# print(r.status_code)
data = json.loads(resp.content)
# print(data)
Expand Down
4 changes: 2 additions & 2 deletions Current_City_Weather/Weather.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ def main():
city = input("City Name : ")
units_format = "&units=metric"
final_url = api_address + city + units_format
json_data = requests.get(final_url).json()
json_data = requests.get(final_url, timeout=60).json()
weather_details = get_weather_data(json_data, city)
# print formatted data
print(weather_details)



main()
main()
2 changes: 1 addition & 1 deletion Git_repo_creator/git_repo_creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
print(repo_description)

payload = {'name': repo_name, 'description': repo_description, 'auto_init': 'true'}
repo_request = requests.post('https://api.github.com/' + 'user/repos', auth=(user_name,github_token), data=json.dumps(payload))
repo_request = requests.post('https://api.github.com/' + 'user/repos', auth=(user_name,github_token), data=json.dumps(payload), timeout=60)
if repo_request.status_code == 422:
print("Github repo already exists try wih other name.")
elif repo_request.status_code == 201:
Expand Down
6 changes: 3 additions & 3 deletions Github_Bot/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self):

def get_user_details(self, args):
url = self.base_url + "users/" + args[0]
res = requests.get(url)
res = requests.get(url, timeout=60)
print('*********** USER:', args[0], '***************')
if res.status_code == 200:
data = json.loads(res.text)
Expand All @@ -28,7 +28,7 @@ def get_user_details(self, args):

def get_repo_details(self, args):
url = self.base_url + "repos/" + args[0] + "/" + args[1]
res = requests.get(url)
res = requests.get(url, timeout=60)
print('********* USER:', args[0], '| REPO:', args[1], '*********')
if res.status_code == 200:
data = json.loads(res.text)
Expand All @@ -46,4 +46,4 @@ def get_repo_details(self, args):
'repo': obj.get_repo_details,
}
function_map[args['function']](args['list'])


4 changes: 2 additions & 2 deletions InstadpShower/dppage.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
username=input("Enter the username : ")

try:
content = requests.get("https://www.instagram.com/"+username).content
content = requests.get("https://www.instagram.com/"+username, timeout=60).content
find=re.findall(r"logging_page_id.*show_suggested_profiles",str(content))
user_id=((find[0][16:]).split(","))[0][14:-1] # We get the user id of the username
jsonreq=requests.get("https://i.instagram.com/api/v1/users/"+user_id+"/info/").content # using a link we get the whole info of the person
jsonreq=requests.get("https://i.instagram.com/api/v1/users/"+user_id+"/info/", timeout=60).content # using a link we get the whole info of the person
jsonloaded=json.loads(jsonreq)
imgurl=jsonloaded["user"]["hd_profile_pic_url_info"]["url"]
wb.open_new_tab(imgurl)
Expand Down
4 changes: 2 additions & 2 deletions Proxy-Request/proxy_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

def get_proxy():
url = "https://www.sslproxies.org/"
r = requests.get(url)
r = requests.get(url, timeout=60)
soup = BeautifulSoup(r.content, 'html5lib')
return {'https': choice(list(map(lambda x:x[0]+':'+x[1], list(zip(map(lambda x:x.text, soup.findAll('td')[::8]),
map(lambda x:x.text, soup.findAll('td')[1::8]))))))}
Expand All @@ -23,4 +23,4 @@ def proxy_request(request_type, url, **kwargs):


if __name__ == "__main__":
r = proxy_request('get', "https://www.youtube.com/IndianPythonista")
r = proxy_request('get', "https://www.youtube.com/IndianPythonista")
4 changes: 2 additions & 2 deletions SimpleWebpageParser/SimpleWebpageParser.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ def __init__(self, url):
self.url = url

def getHTML(self):
r = requests.get(self.url)
r = requests.get(self.url, timeout=60)
data = r.text
soup = BeautifulSoup(data,"lxml")
return soup
return soup
2 changes: 1 addition & 1 deletion Slideshare-Downloader/slideshare_downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def download_images(page_url):
for image in images:
image_url = image.get('data-full').split('?')[0]
with open(urlparse(image_url).path.split('/')[-1], "wb") as file:
response = get(image_url)
response = get(image_url, timeout=60)
file.write(response.content)
return image_dir

Expand Down
2 changes: 1 addition & 1 deletion To Do Bot/bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


def get_url(url):
response = requests.get(url)
response = requests.get(url, timeout=60)
content = response.content.decode("utf8")
return content

Expand Down
6 changes: 3 additions & 3 deletions Toonify/toonify-API-1.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@
files={
'image': open(args['input'], 'rb'),
},
headers={'api-key': args['apikey']}
)
print(r.json()['output_url'])
headers={'api-key': args['apikey']},
timeout=60)
print(r.json()['output_url'])
4 changes: 2 additions & 2 deletions Toonify/toonify-API-2.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,6 @@
files={
'image': args['input'],
},
headers={'api-key': args['apikey']}
)
headers={'api-key': args['apikey']},
timeout=60)
print(r.json()['output_url'])
4 changes: 2 additions & 2 deletions Top_News/coolnews.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from bs4 import BeautifulSoup
import pprint

res = requests.get('https://news.ycombinator.com/news')
res2 = requests.get('https://news.ycombinator.com/news?p=2')
res = requests.get('https://news.ycombinator.com/news', timeout=60)
res2 = requests.get('https://news.ycombinator.com/news?p=2', timeout=60)
soup = BeautifulSoup(res.text, 'html.parser')
soup2 = BeautifulSoup(res2.text, 'html.parser')

Expand Down
4 changes: 2 additions & 2 deletions codeforcesChecker/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def getOutputFiles(output): #function to get sample outputs
currentDirectory=os.getcwd()
toadd='Problem'+elements[i].text
print("Getting essentials for Problem"+elements[i].text)
res=requests.get(url+'/problem/'+elements[i].text)
res=requests.get(url+'/problem/'+elements[i].text, timeout=60)
os.makedirs(os.path.join(currentDirectory,toadd)) #make directory for individual problems
os.chdir(os.path.join(currentDirectory,toadd))
soup=bs4.BeautifulSoup(res.text,'html.parser') #using beautiful soup to parse HTML
Expand All @@ -111,7 +111,7 @@ def getOutputFiles(output): #function to get sample outputs
problemFolder='Problem '+url[len(url)-6:len(url)-2]+url[len(url)-1:]
os.makedirs(os.path.join(os.getcwd(),problemFolder))
os.chdir(os.path.join(os.getcwd(),problemFolder))
res=requests.get(url)
res=requests.get(url, timeout=60)
soup=bs4.BeautifulSoup(res.text,'html.parser') #using beautiful soup to parse HTML
input=soup.find_all('div',{'class':'input'})
output=soup.find_all('div',{'class':'output'})
Expand Down
2 changes: 1 addition & 1 deletion covid_visualiser/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import requests
import json

r = requests.get(url='https://api.covid19india.org/data.json')
r = requests.get(url='https://api.covid19india.org/data.json', timeout=60)
statewise_covid_data = json.loads(r.content)['statewise']

with open('capital_data.json', 'r') as f:
Expand Down
4 changes: 2 additions & 2 deletions elastic-snapshot/elastic-snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def get_snapshot_listing(options):
return url
else:
#do the real stuff
r = requests.get(url)
r = requests.get(url, timeout=60)
if r.status_code == 200:
return r.text
else:
Expand Down Expand Up @@ -82,7 +82,7 @@ def create_snapshot(options):
if sleep_count > 20:
print("Snapshot still running, exceeded 10 sleep cycles")
exit(2)
snap_result = requests.put(url, json=payload)
snap_result = requests.put(url, json=payload, timeout=60)
if snap_result.status_code == 200 and options.verbose:
print("Requested: {}".format(snap_result.json()))
else:
Expand Down
4 changes: 2 additions & 2 deletions imdb_episode_ratings/scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def get_static_html ( search_url ) :
## create the soup object for the page
try:
r_page = requests.get ( search_url )
r_page = requests.get ( search_url, timeout=60)
except:
print("Connection refused by the server..")
time.sleep(5)
Expand Down Expand Up @@ -104,4 +104,4 @@ def start() :
print ( " Finished ")
result_file.save( show_name.replace('+' , '_') + '.xls')

start()
start()
76 changes: 38 additions & 38 deletions insta_video_downloader/instavideo.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
import urllib.request
import requests
import json
import re
import sys
def download(post_id):
multiple_posts = False
videos = []
data = requests.get("https://instagram.com/p/{}".format(post_id))
if data.status_code == 404:
print("Specified post not found")
sys.exit()
json_data = json.loads(re.findall(r'window._sharedData\s=\s(\{.*\});</script>', data.text)[0])
data = json_data['entry_data']['PostPage'][0]['graphql']['shortcode_media']
if 'edge_sidecar_to_children' in data.keys():
multiple_posts = True
caption = data['edge_media_to_caption']['edges'][0]['node']['text']
media_url = data['display_resources'][2]['src']
is_video = data['is_video']
if not is_video and not multiple_posts:
print("No Videos found")
sys.exit()
if is_video:
videos.append(data['video_url'])
if multiple_posts:
for post in data['edge_sidecar_to_children']['edges']:
if post['node']['is_video']:
videos.append(post['node']['video_url'])
print("Found total {} videos".format(len(videos)))
for no, video in zip(list(range(len(videos))), videos):
print("Downloading video {}".format(no))
urllib.request.urlretrieve(video, "{}_{}.mp4".format(post_id, no))
if len(sys.argv) == 1:
print("Please provide instagram post id")
else:
download(sys.argv[1])
import urllib.request
import requests
import json
import re
import sys

def download(post_id):
multiple_posts = False
videos = []
data = requests.get("https://instagram.com/p/{}".format(post_id), timeout=60)
if data.status_code == 404:
print("Specified post not found")
sys.exit()
json_data = json.loads(re.findall(r'window._sharedData\s=\s(\{.*\});</script>', data.text)[0])
data = json_data['entry_data']['PostPage'][0]['graphql']['shortcode_media']
if 'edge_sidecar_to_children' in data.keys():
multiple_posts = True
caption = data['edge_media_to_caption']['edges'][0]['node']['text']
media_url = data['display_resources'][2]['src']
is_video = data['is_video']
if not is_video and not multiple_posts:
print("No Videos found")
sys.exit()
if is_video:
videos.append(data['video_url'])
if multiple_posts:
for post in data['edge_sidecar_to_children']['edges']:
if post['node']['is_video']:
videos.append(post['node']['video_url'])
print("Found total {} videos".format(len(videos)))
for no, video in zip(list(range(len(videos))), videos):
print("Downloading video {}".format(no))
urllib.request.urlretrieve(video, "{}_{}.mp4".format(post_id, no))

if len(sys.argv) == 1:
print("Please provide instagram post id")
else:
download(sys.argv[1])
6 changes: 3 additions & 3 deletions medium_article_downloader/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get_topic():
def extract_links(url):
'''Extract article links from url'''

html_response = requests.get(url)
html_response = requests.get(url, timeout=60)
parsed_response = bs4.BeautifulSoup(html_response.text, features='html5lib')
article_list = parsed_response.select('h3 > a')
return article_list
Expand All @@ -32,12 +32,12 @@ def extract_links(url):
def medium_text(url):
'''Extract text from a medium article link.'''

html_response = requests.get(url)
html_response = requests.get(url, timeout=60)
parsed_response = bs4.BeautifulSoup(html_response.text, features='html5lib')
tag_list = parsed_response.find_all(['h1', 'p', 'h2'])

extracted_text = ''
for j in range(len(tag_list)):
extracted_text += tag_list[j].getText() + '\n\n'

return extracted_text
return extracted_text
2 changes: 1 addition & 1 deletion url_shortener/url_shortener.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def request_short_url(self):
prarams = {API_PARAM: self.__long_url}

try:
result = requests.post(API_URL, data = prarams)
result = requests.post(API_URL, data = prarams, timeout=60)
except ConnectionError as err:
return -1, err

Expand Down
X Tutup