-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathsummary.py
165 lines (105 loc) · 4.02 KB
/
summary.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#
# summary.py
#
# This file contains code designed to print
# out (display to the user) various summary
# statistics computed using the functions
# defined by the stats.py file
#
# imports #
from __future__ import print_function
import json
import requests
from stats import *
# functions #
def print_summary(v_counts, c_counts, w_counts, mood, verbosity=0):
'''
Formats and prints the summary stats for
a given piece of text, based on arguments
which can be obtained via a call to the
report_summary function
verbosity level == 0 : display word counts...
verbosity level == 1 : ...and display mood for text...
verbosity level >= 2 : ...and display character counts
'''
if verbosity >= 2:
# render output of analysis to the terminal
print('This text contains {} vowels'.format(len(v_counts)))
print('This text contains {} consonants'.format(len(c_counts)), end='\n\n')
v_counts.update(c_counts)
# values for clean output display
max_count = max(v_counts.values())
fmt_str = '{} : {:' + str(len(str(max_count)) + 1) + 'd}'
if verbosity >= 2:
# each character with its count (sorted in alphabetical order)
for ch, count in sorted(v_counts.items()):
print(fmt_str.format(ch, count))
print()
# values for clean display of output
max_count = max(w_counts.values())
longest_word = max(w_counts.keys(), key=lambda x : len(x))
fmt_str = '{:' + str(len(longest_word) + 1) + 's} : {:' + str(len(str(max_count)) + 1) + '}'
if verbosity >= 0:
# each word with its count (sorted in descending order by frequency)
for word, count in sorted(w_counts.items(), key = lambda wc : wc[1], reverse=True):
print(fmt_str.format(word, count))
print()
if verbosity >= 1:
# output the predicted "mood" for the text
print('This text has a "{}" mood'.format(mood), end='\n\n')
def print_summary_from_string(s, verbosity=0, json_mode=False):
'''
Prints the summary statistics given
only the input string itself
'''
# JSON output display mode
if json_mode:
info_dict = report_json_summary(s)
info_dict['name'] = s
print(json.dumps(info_dict), end='\n\n')
else:
v, c, w, m = report_summary(s)
print('Summary for string : "{}"'.format(s), end='\n\n')
print_summary(v, c, w, m, verbosity)
def print_summary_from_file(filename, verbosity=0, json_mode=False):
'''
Prints the summary statistics
for the file located at <filename>
NOTE - this function is very memory-intensive
since it loads the entire file into memory
at once, use with caution
'''
# JSON output display mode
if json_mode:
with open(filename) as f:
info_dict = report_json_summary(f.read())
info_dict['name'] = filename
print(json.dumps(info_dict), end='\n\n')
else:
print('Summary for file : "{}"'.format(filename), end='\n\n')
if not os.path.exists(filename):
print('Cannot find file : {}'.format(filename))
else:
with open(filename) as f:
s = f.read()
print_summary_from_string(s, verbosity)
def print_summary_from_url(url, verbosity, json_mode=False):
'''
Prints the summary statistics
obtained after analyzing the text
located at the provided url
'''
# JSON output display mode
if json_mode:
r = requests.get(url)
info_dict = report_json_summary(r.content)
info_dict['name'] = url
print(json.dumps(info_dict), end='\n\n')
else:
print('Summary for text at : "{}"'.format(url), end='\n\n')
try:
r = requests.get(url)
except requests.exceptions.ConnectionError:
print('Invalid url: {}'.format(url))
else:
print_summary_from_string(r.content, verbosity)