forked from theyogicoderRI/PythonMadeEasy
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathread_write_csv.py
More file actions
92 lines (73 loc) · 2.99 KB
/
read_write_csv.py
File metadata and controls
92 lines (73 loc) · 2.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#read from one csv and write to another
import csv
from datetime import date
now = date.today()
def get_file():
'''this creates the file name we will write to each day'''
return "gaps_" + str(now) + ".csv"
name = get_file()
def create_file(name):
with open(name, 'w') as create:
fieldnames= ["GAP#", "ID", "LOB", "Category", "TargetDate", "Criticality", "% Complete"]
writer = csv.DictWriter(create, fieldnames=fieldnames, lineterminator="\n") # keeps from skipping line
writer.writeheader()
with open('mydata/daily_file_2019-01-30.csv', "r") as daily:
rows = csv.reader(daily)
Line = 1
for row in rows:
new_line = str(now) + "-" + str(Line)
if row[5] == "High":
writer.writerow({"GAP#": new_line, "ID": row[1], "LOB": row[2],"Category": row[3],
"TargetDate": row[4], "Criticality": row[5], "% Complete": row[6]})
Line += 1
print("*" * 50)
return "'" +str(name)+"'" + " has been succesfully created! ON DATE: " + str(now)
print(create_file(name) )
def get_avg(field_name):
with open(name, "r") as output:
rows = csv.DictReader(output)
x = 0
count = 0
for row in rows:
y = { key for key in row.items() if key == field_name}
print(y)
x = (x + int(row.__getitem__(field_name)))
count = count +1
return "The % Complete on High-Rated items is : " + str(x/ count) + "%"
print(get_avg('% Complete'))
# get unique Item count from a given column
def get_unique(field_name):
with open(name, "r") as output:
rows = csv.DictReader(output)
list_names = []
count = 0
for row in rows:
if row[field_name] in list_names:
continue
else:
list_names.append(row[field_name])
count += 1
print("There are {} unique values in this field" .format(count))
get_unique('LOB')
import csv
# count the number of times an item appears in a row
# also get % across total population for each
ref_count = {} # to export specific row
ref_orig = [] # talley # of rows
def group_me(field_num):
with open('gaps_2019-01-31.csv', "r") as output:
rows = csv.reader(output)
next(rows, None)
for row in rows:
ref_orig.append(row)
if not row[field_num] in ref_count:
ref_count[row[field_num]] = 0
ref_count[row[field_num]]+=1
group_me(2) # caller - get specific row #2
#prints out results for selected row, gets counts and percentages
print("-" * 60)
for k, v in ref_count.items():
print("Item: {0:12} Count: {1} - {2}%" .format( k, v, round((v/len(ref_orig) * 100),1)))
print("-" * 60)
print("Total Population = ", len(ref_orig))
print("-" * 60)