当前位置:编程学习 > 网站相关 >>

flume写入hadoop hdfs报错 Too many open files

故障现象:
   [hadoop@dtydb6 logs]$ vi hadoop-hadoop-datanode-dtydb6.log
 
        at java.io.RandomAccessFile.<init>(RandomAccessFile.java:233)
        at org.apache.hadoop.hdfs.server.datanode.FSDataset.getBlockInputStream(FSDataset.java:1094)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:168)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:81)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyBlock(DataBlockScanner.java:453)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyFirstBlock(DataBlockScanner.java:519)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.run(DataBlockScanner.java:617)
        at java.lang.Thread.run(Thread.java:722)
 
2013-02-17 00:00:29,023 WARN org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: Second Verification failed for blk_1408462853104263034_39617. Exception : java.io.FileNotFoundException: /hadoop/logdata/current/subdir2/subdir2/blk_1408462853104263034 (Too many open files)
        at java.io.RandomAccessFile.open(Native Method)
        at java.io.RandomAccessFile.<init>(RandomAccessFile.java:233)
        at org.apache.hadoop.hdfs.server.datanode.FSDataset.getBlockInputStream(FSDataset.java:1094)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:168)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:81)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyBlock(DataBlockScanner.java:453)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyFirstBlock(DataBlockScanner.java:519)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.run(DataBlockScanner.java:617)
        at java.lang.Thread.run(Thread.java:722)
 
2013-02-17 00:00:29,023 INFO org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: Reporting bad block blk_1408462853104263034_39617 to namenode.
2013-02-17 00:00:53,076 WARN org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: First Verification failed for blk_4328439663130931718_44579. Exception : java.io.FileNotFoundException: /hadoop/logdata/current/subdir9/subdir12/blk_4328439663130931718 (Too many open files)
        at java.io.RandomAccessFile.open(Native Method)
        at java.io.RandomAccessFile.<init>(RandomAccessFile.java:233)
        at org.apache.hadoop.hdfs.server.datanode.FSDataset.getBlockInputStream(FSDataset.java:1094)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:168)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:81)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyBlock(DataBlockScanner.java:453)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyFirstBlock(DataBlockScanner.java:519)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.run(DataBlockScanner.java:617)
        at java.lang.Thread.run(Thread.java:722)
 
2013-02-17 00:00:53,077 WARN org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: Second Verification failed for blk_4328439663130931718_44579. Exception : java.io.FileNotFoundException: /hadoop/logdata/current/subdir9/subdir12/blk_4328439663130931718 (Too many open files)
        at java.io.RandomAccessFile.open(Native Method)
        at java.io.RandomAccessFile.<init>(RandomAccessFile.java:233)
        at org.apache.hadoop.hdfs.server.datanode.FSDataset.getBlockInputStream(FSDataset.java:1094)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:168)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:81)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyBlock(DataBlockScanner.java:453)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyFirstBlock(DataBlockScanner.java:519)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.run(DataBlockScanner.java:617)
        at java.lang.Thread.run(Thread.java:722)
 
2013-02-17 00:00:53,077 INFO org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: Reporting bad block blk_4328439663130931718_44579 to namenode.
2013-02-17 00:01:10,115 WARN org.apache.hadoop.hdfs.server.datanode.DataBlockScanner: First Verification failed for blk_2833765807455012512_10228. Exception : java.io.FileNotFoundException: /hadoop/logdata/current/subdir63/subdir25/blk_2833765807455012512 (Too many open files)
        at java.io.RandomAccessFile.open(Native Method)
        at java.io.RandomAccessFile.<init>(RandomAccessFile.java:233)
        at org.apache.hadoop.hdfs.server.datanode.FSDataset.getBlockInputStream(FSDataset.java:1094)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:168)
        at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:81)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyBlock(DataBlockScanner.java:453)
        at org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.verifyFirstBlock(DataBlockScanner.java:519)
网络搜索,怀疑linux nofile超过最大限制,当前设置大小1024,默认值
[hadoop@dtydb6 logs]$ ulimit -a
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
sche易做图ng priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 1064960
max locked memory       (kbytes, -l) 32
max memory size         (kbytes, -m) unlimited
open files                      (-n) 1024
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 10240
cpu time               (seconds, -t) unlimited
max user processes              (-u) 106
补充:综合编程 , 其他综合 ,
CopyRight © 2022 站长资源库 编程知识问答 zzzyk.com All Rights Reserved
部分文章来自网络,