当前位置:数据库 > mongodb >>

MongoDB千万级数据的分析

MongoDB千万级数据的分析
 
一、导入
 
清单1:
 
读取CSV文件,存储到数据库中
 
01
#-*- coding:UTF-8 -*-
02
'''
03
Created on 2013-10-20
04
 
05
 
06
@author: tyk
07
 
08
 
09
 
10
'''
11
from pymongo.connection import Connection
12
from time import time
13
import codecs
14
import csv
15
import os
16
rootdir = "2000W/"   # 指明被遍历的文件夹
17
'''
18
 
19
'''
20
def process_data():
21
  conn = Connection('localhost', 27017) #获取一个连接
22
  ##conn.drop_database('guestHouse') 
23
  db = conn.TYK 
24
  guest = db.guestHouse  
25
 
26
 
27
  guest_info = []
28
  for parent, dirnames, filenames in os.walk(rootdir):  #三个参数:分别返回1.父目录 2.所有文件夹名字(不含路径) 3.所有文件名字
29
  for filename in filenames:  
30
  ErrorLine = []
31
  key_length = 0
32
  fullname = os.path.join(parent,filename)
33
  try:
34
  #with codecs.open(fullname, encoding='utf_8') as file:
35
  with codecs.open(fullname, encoding='utf_8_sig') as file:#忽略UTF-8文件前面的BOM
36
  keys = file.readline().split(',')#先读掉第一行的注释
37
  key_length = len(keys)
38
  spamreader = csv.reader(file)#以CSV格式读取,返回的不再是str,而是list
39
  for line in spamreader:
40
  if key_length != len(line):#部分数据不完整,记录下来
41
  ErrorLine.append(line)
42
  else:
43
  each_info = {}
44
  for i in range(1, len(keys)):#过滤第一个字段Name,姓名将不再存到数据库中
45
  each_info[keys[i]] = line[i]
46
 
47
  guest_info.append(each_info)
48
  if len(guest_info) == 10000:#每10000条进行一次存储操作
49
  guest.insert(guest_info)  
50
  guest_info = []
51
 
52
  except Exception, e:
53
  print filename + "\t" + str(e)
54
 
55
  #统一处理错误信息
56
  with open('ERR/' + os.path.splitext(filename)[0] + '-ERR.csv', 'w') as log_file:
57
  spamwriter = csv.writer(log_file)
58
  for line in ErrorLine:
59
  spamwriter.writerow(line)
60
  #最后一批
61
  guest.insert(guest_info)  
62
 
63
if __name__ == '__main__':
64
  start = time()
65
  process_data()
66
  stop = time()
67
  print(str(stop-start) + "秒")
后来睡着了、关机了,耗时多久也不得而知了⊙﹏⊙b汗
 
总结:
 
1.文件编码为UTF-8,不能直接open()打开读取。
 
2.文件已CSV格式进行存储,读取时用CSV模块处理来读取。这是读出来的数据每行为一个list。注意,不能简单的以","拆分后进行读取。对于这种形状"a,b,c", d的数据是无易做图确解析的。
 
3.对于UTF-8文件,如果有BOM的形式去读是要以'utf_8_sig'编码读取,这样会跳过开头的BOM。如果不处理掉BOM,BOM会随数据一同存到数据库中,造成类似" XXX"的现象(有一个空格的假象)。
 
如果真的已经存到库中了,那只有改key了
 
1
db.guestHouse.update({}, {"$rename" : {" Name" : "Name"}}, false, true)
另外,网上还有一种方法(尝试失败了,具体原因应该是把字符串转换成字节码然后再去比较。怎么转这个我还不会...)
 
1
#with codecs.open(fullname, encoding='utf-8') as file:
2
with codecs.open(fullname, encoding='utf_8_sig') as file:
3
  keys = file.readline().split(',')
4
  if keys[0][:3] == codecs.BOM_UTF8:#将keys[0]转化为字节码再去比较
5
  keys[0] = keys[0][3:]
扩展:
 
今天发现MongoDB本身就带有导入功能mongoimport,可以直接导入CSV文件...
 
小试一把
 
1.不做错误数据过滤,直接导入。用专利引用数据做一下实验(《Hadoop权威指南》一书中的实验数据)
 
实验数据:
 
01
"PATENT","GYEAR","GDATE","APPYEAR","COUNTRY","POSTATE","ASSIGNEE","ASSCODE","CLAIMS","NCLASS","CAT","SUBCAT","CMADE","CRECEIVE","RATIOCIT","GENERAL","ORIGINAL","FWDAPLAG","BCKGTLAG","SELFCTUB","SELFCTLB","SECDUPBD","SECDLWBD"
02
3070801,1963,1096,,"BE","",,1,,269,6,69,,1,,0,,,,,,,
03
3070802,1963,1096,,"US","TX",,1,,2,6,63,,0,,,,,,,,,
04
3070803,1963,1096,,"US",
05
"IL",,1,,2,6,63,,9,,0.3704,,,,,,,
06
3070804,1963,1096,,"US","OH",,1,,2,6,63,,3,,0.6667,,,,,,,
07
3070805,1963,1096,,"US","CA",,1,,2,6,63,,1,,0,,,,,,,
08
3070806,1963,1096,,"US","PA",,1,,2,6,63,,0,,,,,,,,,
09
3070807,1963,1096,,"US","OH",,1,,623,3,39,,3,,0.4444,,,,,,,
10
3070808,1963,1096,,"US","IA",,1,,623,3,39,,4,,0.375,,,,,,,
11
3070809,1963,1096,,,,1,,4,6,65,,0,,,,,,,,,
1
mongoimport -d TYK -c guest --type csv --file d:\text.csv --headerline
一共11行。第一行注释,9条数据。第3条中间截断,第9条取出中间两个数值"US","AZ"。按照csv规定现在应该是10条数据
结果:
 
01
> db.guest.find({}, {"PATENT" : 1, "_id" : 1})
02
{ "_id" : ObjectId("52692c2a0b082a1bbb727d86"), "PATENT" : 3070801 }
03
{ "_id" : ObjectId("52692c2a0b082a1bbb727d87"), "PATENT" : 3070802 }
04
{ "_id" : ObjectId("52692c2a0b082a1bbb727d88"), "PATENT" : 3070803 }
05
{ "_id" : ObjectId("52692c2a0b082a1bbb727d89"), "PATENT" : "IL" }
06
{ "_id" : ObjectId(
CopyRight © 2022 站长资源库 编程知识问答 zzzyk.com All Rights Reserved
部分文章来自网络,