Hadoop NameNode起動のロードFSImage(二)
6389 ワード
前回は起動中にFSImageをロードする前の一連の検査を分析したが、今回はFSImageの本当のロード部分を見てみると、ここでこのファイルに対する読み取り操作が見られ、これらの操作によってこのファイルの構造、およびメモリの中の構造が知ることができ、個人的にHDFSは安全、回復の面での考慮から、起動中にかなりの検査をしたと感じている.しかし、このファイルを読むだけでは、流れは理解しやすいので、JAVAのファイルの読み書きに精通していれば、この関数は一目で大体分かります.クラスタには次のようなディレクトリがあります.
HDFS://192.168.0.1:9000/a.txt
HDFS://192.168.0.1:9000/test/b.txt
ファイルは全部で2つですが、統計ファイル数はルートディレクトリ、testディレクトリ、a.txt、b.txtの4つです
FSImage.java
HDFS://192.168.0.1:9000/a.txt
HDFS://192.168.0.1:9000/test/b.txt
ファイルは全部で2つですが、統計ファイル数はルートディレクトリ、testディレクトリ、a.txt、b.txtの4つです
FSImage.java
boolean loadFSImage(File curFile) throws IOException {
assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
assert curFile != null : "curFile is null";
FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
FSDirectory fsDir = fsNamesys.dir;
//
// Load in bits
//
boolean needToSave = true;
// ,
DataInputStream in = new DataInputStream(new BufferedInputStream(
new FileInputStream(curFile)));
try {
/*
* Note: Remove any checks for versionearlier than
* Storage.LAST_UPGRADABLE_LAYOUT_VERSIONsince we should never get
* to here with older images.
*/
/*
* TODO we need to change format of the image file
* it should not contain version and namespacefields
*/
// read image version: first appeared in version -1
// , , ,
int imgVersion = in.readInt();
// read namespaceID: first appeared in version -2
this.namespaceID = in.readInt();
// read number of files
long numFiles;
if (imgVersion <= -16) {
numFiles = in.readLong();
} else {
numFiles = in.readInt();
}
this.layoutVersion = imgVersion;
// read in the last generation stamp.
if (imgVersion <= -12) {
long genstamp = in.readLong();
fsNamesys.setGenerationStamp(genstamp);
}
needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
// read file info
short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
LOG.info("Number of files = " + numFiles);
String path;
StringparentPath = "";
INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) {
long modificationTime = 0;
long atime = 0;
long blockSize = 0;
// , , “/a.txt”
path = readString(in);
replication = in.readShort();
// , minReplication maxReplication , ,
//
replication = FSEditLog.adjustReplication(replication);
modificationTime = in.readLong();
if (imgVersion <= -17) {
atime = in.readLong();
}
if (imgVersion <= -8) {
blockSize = in.readLong();
}
//
int numBlocks = in.readInt();
Block blocks[] = null;
// for older versions, a blocklistof size 0
// indicates a directory.
if ((-9 <= imgVersion && numBlocks> 0) ||
(imgVersion < -9 &&numBlocks >= 0)) {
blocks = new Block[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new Block();
if (-14 < imgVersion) {
blocks[j].set(in.readLong(), in.readLong(),
Block.GRANDFATHER_GENERATION_STAMP);
} else {
// , blockid, , Stamp , blockid
// , , ,
// , DATANODE block , block ,
// rebalance , , blockid,
// DATANODE , , namenode ,
//fsimage 。
blocks[j].readFields(in);
}
}
}
// Older versions of HDFS does not storethe block size in inode.
// If the file has more than one block, usethe size of the
// first block as the blocksize.Otherwise use the default block size.
// , block blocksize,
//
if (-8 <= imgVersion && blockSize ==0) {
if (numBlocks > 1) {
blockSize =blocks[0].getNumBytes();
} else {
long first = ((numBlocks == 1) ?blocks[0].getNumBytes(): 0);
blockSize = Math.max(fsNamesys.getDefaultBlockSize(),first);
}
}
// ,
long nsQuota = -1L;
if (imgVersion <= -16 && blocks == null) {
nsQuota = in.readLong();
}
long dsQuota = -1L;
if (imgVersion <= -18 && blocks == null) {
dsQuota = in.readLong();
}
PermissionStatus permissions =fsNamesys.getUpgradePermission();
if (imgVersion <= -11) {
// , 、 、RWX , user ownergroup
// Linux ,
permissions = PermissionStatus.read(in);
}
// , , , ,
if (path.length() == 0) { // it is the root
// update the root's attributes
if (nsQuota != -1 || dsQuota != -1) {
fsDir.rootDir.setQuota(nsQuota, dsQuota);
}
fsDir.rootDir.setModificationTime(modificationTime);
fsDir.rootDir.setPermissionStatus(permissions);
continue;
}
// check if the new inode belongs tothe same parent
if(!isParent(path, parentPath)) {
parentINode = null;
parentPath = getParent(path);
}
//
parentINode = fsDir.addToParent(path,parentINode, permissions,
blocks,replication, modificationTime,
atime,nsQuota, dsQuota, blockSize);
}
// DATANODE , 1.0.4 , , fsimage
this.loadDatanodes(imgVersion, in);
// load Files Under Construction
this.loadFilesUnderConstruction(imgVersion, in,fsNamesys);
// ,
this.loadSecretManagerState(imgVersion, in,fsNamesys);
} finally {
in.close();
}
return needToSave;
}