データマイニングノート-クラスタリング-DBSCAN-ドキュメントクラスタリング


DBSCANアルゴリズムの原理は参照できる
データマイニングノート-クラスタリング-DBSCAN-原理と簡単な実現
本編では主にDBSCANアルゴリズムに基づいてドキュメントセットのクラスタリングを実現する.まずクラスタリングが必要な文書を方向量子化処理し,ここではTFIDF値で表す.ドキュメント間の距離は余弦距離を選択し、後の手順は変わりません.DBSCANアルゴリズムのクラスタリングが完了した後、結果は理想的ではなく、後でデータを次元化した後、結果は理想的であることが分かった.
JAva実装コードは以下の通りです.
public class DocDBScanBuilder {
	
	//  
	public static double EPISLON = 0.04;
	//  、     
	public static int MIN_POINTS = 15;
	
	//     
	public List<DataPoint> initData() {
		List<DataPoint> dataPoints = new ArrayList<DataPoint>();
		try {
			String path = DocDBScanBuilder.class.getClassLoader().getResource("  ").toURI().getPath();
			DocumentSet documentSet = DocumentLoader.loadDocumentSetByThread(path);
			List<Document> documents = documentSet.getDocuments();
			DocumentUtils.calculateTFIDF_0(documents);
			for(Document doc : documents) {
				DataPoint dataPoint = new DataPoint();
				dataPoint.setValues(doc.getTfidfWords());
				dataPoint.setCategory(doc.getCategory());
				dataPoints.add(dataPoint);
			}
		} catch (URISyntaxException e) {
			e.printStackTrace();
		}
		return dataPoints;
	}
	
	//        
	public List<DataPoint> obtainNeighbors(DataPoint current, List<DataPoint> points) {
		List<DataPoint> neighbors = new ArrayList<DataPoint>();
		for (DataPoint point : points) {
			double distance = DistanceUtils.cosine(current.getValues(), point.getValues());
//			System.out.println("distance: " + distance);
			if (distance > EPISLON) {
				neighbors.add(point);
			}
		}
		return neighbors;
	}
	
	public void mergeCluster(DataPoint point, List<DataPoint> neighbors,
			int clusterId, List<DataPoint> points) {
		point.setClusterId(clusterId);
		for (DataPoint neighbor : neighbors) {
			//                    
			//       ,                    
			if (!neighbor.isAccessed()) {
				neighbor.setAccessed(true);
				List<DataPoint> nneighbors = obtainNeighbors(neighbor, points);
				if (nneighbors.size() > MIN_POINTS) {
					for (DataPoint nneighbor : nneighbors) {
						if (nneighbor.getClusterId() <= 0) {
							nneighbor.setClusterId(clusterId);
						}
					}
				}
			}
			//             
			if (neighbor.getClusterId() <= 0) {
				neighbor.setClusterId(clusterId);
			}
		}
	}
	
	public void cluster(List<DataPoint> points) {
		//clusterId   0     ,          ,     -1     
		int clusterId = 0;
		boolean flag = true;
		//              
		while (flag) {
			for (DataPoint point : points) {
				if (point.isAccessed()) {
					continue;
				}
				point.setAccessed(true);
				flag = true;
				List<DataPoint> neighbors = obtainNeighbors(point, points);
				System.out.println("----------------------------neighbors: " + neighbors.size());
				if (neighbors.size() >= MIN_POINTS) {
					//                
					clusterId = point.getClusterId() <= 0 ? (++clusterId) : point.getClusterId();
					System.out.println("--------------------------------clusterId: " + clusterId);
					mergeCluster(point, neighbors, clusterId, points);
				} else {
					//                   
					if(point.getClusterId() <= 0) {
						 point.setClusterId(-1);
					}
				}
				flag = false;
			}
		}
	}
	
	public List<Map.Entry<String, Double>> sortMap(Map<String, Double> map) {
		List<Map.Entry<String, Double>> list = 
				new ArrayList<Map.Entry<String, Double>>(map.entrySet());
		Collections.sort(list, new Comparator<Map.Entry<String, Double>>() {
			@Override
			public int compare(Entry<String, Double> o1,
					Entry<String, Double> o2) {
				if (o1.getValue().isNaN()) {
					o1.setValue(0.0);
				}
				if (o2.getValue().isNaN()) {
					o2.setValue(0.0);
				}
				return -o1.getValue().compareTo(o2.getValue());
			}
		});
		return list;
	}
	
	//    
	public void print(List<DataPoint> points) {
		Collections.sort(points, new Comparator<DataPoint>() {
			@Override
			public int compare(DataPoint o1, DataPoint o2) {
				return Integer.valueOf(o1.getClusterId()).compareTo(o2.getClusterId());
			}
		});
		for (DataPoint point : points) {
			System.out.println(point.getClusterId() + " - " + point.getCategory());
		}
	}

	public void run() {
		List<DataPoint> points = initData();
		cluster(points);
		print(points);
	}
	
	public static void main(String[] args) {
		new DocDBScanBuilder().run();
	}
	
}
/**
 *   TFIDF
 * TF          
 * @param documents
 */
public static void calculateTFIDF_0(List<Document> documents) {
	int docTotalCount = documents.size();
	for (Document document : documents) {
		Map<String, Double> tfidfWords = document.getTfidfWords();
		int wordTotalCount = document.getWords().length;
		Map<String, Integer> docWords = DocumentHelper.wordsInDocStatistics(document);
		for (String word : docWords.keySet()) {
			double wordCount = docWords.get(word);
			double tf = wordCount / wordTotalCount;
			double docCount = DocumentHelper.wordInDocsStatistics(word, documents) + 1;
			double idf = Math.log(docTotalCount / docCount);
			double tfidf = tf * idf;
			tfidfWords.put(word, tfidf);
		}
		System.out.println("doc " + document.getName() + " calculate tfidf finish");
	}
}
	
/**
 *   TFIDF
 * TF            
 * @param documents
 */
public static void calculateTFIDF_1(List<Document> documents) {
	int docTotalCount = documents.size();
	for (Document document : documents) {
		Map<String, Double> tfidfWords = document.getTfidfWords();
		List<Map.Entry<String, Double>> list = 
				new ArrayList<Map.Entry<String, Double>>(tfidfWords.entrySet());
		Collections.sort(list, new Comparator<Map.Entry<String, Double>>() {
			@Override
			public int compare(Entry<String, Double> o1,
					Entry<String, Double> o2) {
				return -o1.getValue().compareTo(o2.getValue());
			}
		});
		if (list.size() == 0) continue; 
		double wordTotalCount = list.get(0).getValue();
		Map<String, Integer> docWords = DocumentHelper.wordsInDocStatistics(document);
		for (String word : docWords.keySet()) {
			double wordCount = docWords.get(word);
			double tf = wordCount / wordTotalCount;
			double docCount = DocumentHelper.wordInDocsStatistics(word, documents) + 1;
			double idf = Math.log(docTotalCount / docCount);
			double tfidf = tf * idf;
			tfidfWords.put(word, tfidf);
		}
		System.out.println("doc " + document.getName() + " calculate tfidf finish");
	}
}

コード管理:https://github.com/fighting-one-piece/repository-datamining.git