-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
123 lines (112 loc) · 4.13 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/usr/bin/env python3
import requests
import json
import re
import random
import logging
import facebook
from azure.cognitiveservices.search.imagesearch import ImageSearchAPI
from azure.cognitiveservices.search.imagesearch.models import SafeSearch
from msrest.authentication import CognitiveServicesCredentials
MAX_RETRIES = 4
SEP_WORDS = ['German','Latin','English','Greek','Germanic','French','Dutch']
# returns str
def _get_word(filename="OED_processed.txt"):
with open("OED_processed.txt", "r") as f:
dictionary = f.readlines()
word = dictionary[random.randint(0, len(dictionary))].strip("\n")
logging.info("got word {}".format(word))
return word
def get_word():
word = _get_word()
while not word:
logging.warn("couldn't find word, trying again")
word = _get_word()
if len(word) < 3 or word[len(word)] == '-':
word = _get_word()
return word
# returns json object
def etym_fetch(word_id, api_id, api_key, language="en-gb"):
url = "https://od-api.oxforddictionaries.com:443/api/v2/entries/" + language + "/" + word_id.lower()
r = requests.get(url, headers={"app_id": api_id, "app_key": api_key})
json = r.json()
return json
def get_image(word, api_key, endpoint):
client = ImageSearchAPI(CognitiveServicesCredentials(api_key), base_url=endpoint,)
data = client.images.search(
query=word,
safe_search=SafeSearch.strict
)
if data.value:
first_image = data.value[0]
logging.info("got image url {}".format(first_image.content_url))
img = requests.get(first_image.content_url).content
if not img:
logging.warn("couldn't download image")
raise ValueError("couldn't download image")
return img
else:
logging.warn("couldn't find image")
raise ValueError("couldn't find image")
def get_etymology_and_definition_text(etym):
etym_text = ''
definition_text = ''
for result in etym['results']:
for l_entry in result['lexicalEntries']:
for entry in l_entry['entries']:
try:
definition_text = ''
for sense in entry['senses']:
for definition in sense['definitions']:
definition_text += '{}\n'.format(definition)
except KeyError:
pass
try:
etym_text = ''
for etymology in entry['etymologies']:
etym_text += '{}\n'.format(etymology)
except KeyError:
continue
if etym_text:
return {
'definition': definition_text,
'etymology': etym_text
}
raise ValueError("couldn't find etymology")
def separate(etym_text):
for sep in SEP_WORDS:
if sep in etym_text:
etym_text = etym_text.replace(sep, ' {} '.format(sep))
return etym_text
def post_etym(data, context):
with open("creds.json", "r") as f:
creds = json.load(f)
success = True
graph = facebook.GraphAPI(access_token=creds['facebook']['token'])
# try until success of MAX_RETRIES
for i in range(0,MAX_RETRIES):
word = get_word()
logging.info("got word {}".format(word))
try:
success = True
try:
img = get_image(word, creds['azure']['key'], creds['azure']['endpoint'])
except:
with open('404.JPG', 'rb') as f:
img = f.read()
etym = etym_fetch(word, creds['oed']['id'], creds['oed']['key'])
text_dict = get_etymology_and_definition_text(etym)
text = '{}\nDefinition: {}\nEtymology: {}'.format(
word,
text_dict['definition'],
separate(text_dict['etymology'])
)
graph.put_photo(image=img, message=text)
break
except Exception as e:
logging.error(e)
success = False
continue
return "Success"
if not success:
logging.fatal("couldn't successfully send")