caffeで訓練したlenet_iter_10000.caffemodelテスト単一mnistピクチャ

14040 ワード

前のブログに続きます.http://blog.csdn.net/lanxuecc/article/details/52474476ああ、前にdeployが生成された.proto.txt.以下、具体的な記録用lenet_iter_10000.caffemodelテスト画像.
手書きのデジタル画像を用意します
这里写图片描述
気をつけてprototxtファイルで正しいピクチャのチャネル数を指定します.
平均ファイルの準備
なぜならpyのテストインタフェースcaffe.Classifierは、入力パラメータとしてピクチャの平均ファイルを訓練する必要がありますが、実際のlenet-5訓練では平均ファイルは計算されませんので、ここでは全0の平均ファイル入力を作成します.zeronpを作成します.pyファイルは次の用caffe训练好的lenet_iter_10000.caffemodel测试单张mnist图片_第1张图片で実行されます.
python zeronp.py

平均ファイルmeanfileを生成する.npy. ここでは、入力テストの画像の幅と一致するように注意してください.参考:https://github.com/BVLC/caffe/issues/320
classifyを変更します.pyはclassifymnistとして保存されます.pyファイル
#!/usr/bin/env python
"""
classify.py is an out-of-the-box image classifer callable from the command line.

By default it configures and runs the Caffe reference ImageNet model.
"""
import numpy as np
import os
import sys
import argparse
import glob
import time
import pandas as pd #       

import caffe

def main(argv):
    pycaffe_dir = os.path.dirname(__file__)

    parser = argparse.ArgumentParser()
    # Required arguments: input and output files.
    parser.add_argument(
        "input_file",
        help="Input image, directory, or npy."
    )
    parser.add_argument(
        "output_file",
        help="Output npy filename."
    )
    # Optional arguments.
    parser.add_argument(
        "--model_def",
        default=os.path.join(pycaffe_dir,
                "../examples/mnist/deploy.prototxt"), #  lenet-5 deploy.prototxt    
        help="Model definition file."
    )
    parser.add_argument(
        "--pretrained_model",
        default=os.path.join(pycaffe_dir,
                "../examples/mnist/lenet_iter_10000.caffemodel"), #  lenet-5 caffemodel    
        help="Trained model weights file."
    )
#######  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    parser.add_argument(
        "--labels_file",
        default=os.path.join(pycaffe_dir,
                "../examples/mnist/synset_words.txt"), #              
        help="mnist result words file"
    )
    parser.add_argument(
        "--force_grayscale",
        action='store_true',   #                   ,  lenet-5         
        help="Converts RGB images down to single-channel grayscale versions," +
                   "useful for single-channel networks like MNIST."
    )
    parser.add_argument(
        "--print_results",
        action='store_true', #            
        help="Write output text to stdout rather than serializing to a file."
    )
#######  vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
    parser.add_argument(
        "--gpu",
        action='store_true',
        help="Switch for gpu computation."
    )
    parser.add_argument(
        "--center_only",
        action='store_true',
        help="Switch for prediction from center crop alone instead of " +
             "averaging predictions across crops (default)."
    )
    parser.add_argument(
        "--images_dim",
        default='28,28',     #       
        help="Canonical 'height,width' dimensions of input images."
    )
    parser.add_argument(
        "--mean_file",
        default=os.path.join(pycaffe_dir,
                             '../examples/mnist/meanfile.npy'), #      
        help="Data set image mean of [Channels x Height x Width] dimensions " +
             "(numpy array). Set to '' for no mean subtraction."
    )
    parser.add_argument(
        "--input_scale",
        type=float,
        help="Multiply input features by this scale to finish preprocessing."
    )
    parser.add_argument(
        "--raw_scale",
        type=float,
        default=255.0,
        help="Multiply raw input by this scale before preprocessing."
    )
    parser.add_argument(
        "--channel_swap",
        default='2,1,0',
        help="Order to permute input channels. The default converts " +
             "RGB -> BGR since BGR is the Caffe default by way of OpenCV."
    )
    parser.add_argument(
        "--ext",
        default='jpg',
        help="Image file extension to take as input when a directory " +
             "is given as the input file."
    )
    args = parser.parse_args()

    image_dims = [int(s) for s in args.images_dim.split(',')]

    mean, channel_swap = None, None
    if not args.force_grayscale:
        if args.mean_file:
            mean = np.load(args.mean_file).mean(1).mean(1)
        if args.channel_swap:
            channel_swap = [int(s) for s in args.channel_swap.split(',')]

    if args.gpu:
        caffe.set_mode_gpu()
        print("GPU mode")
    else:
        caffe.set_mode_cpu()
        print("CPU mode")

    # Make classifier.
    classifier = caffe.Classifier(args.model_def, args.pretrained_model,
            image_dims=image_dims, mean=mean,
            input_scale=args.input_scale, raw_scale=args.raw_scale,
            channel_swap=channel_swap)

    # Load numpy array (.npy), directory glob (*.jpg), or image file.
    args.input_file = os.path.expanduser(args.input_file)
    if args.input_file.endswith('npy'):
        print("Loading file: %s" % args.input_file)
        inputs = np.load(args.input_file)
    elif os.path.isdir(args.input_file):
        print("Loading folder: %s" % args.input_file)
        inputs =[caffe.io.load_image(im_f)
                 for im_f in glob.glob(args.input_file + '/*.' + args.ext)]
    else:
        print("Loading image file: %s" % args.input_file)
        inputs = [caffe.io.load_image(args.input_file, not args.force_grayscale)] #        

    print("Classifying %d inputs." % len(inputs))

    # Classify.
    start = time.time()
    scores = classifier.predict(inputs, not args.center_only).flatten()
    print("Done in %.2f s." % (time.time() - start))

   #           ^^^^^^^^
    # print
    if args.print_results:
        with open(args.labels_file) as f:
            labels_df = pd.DataFrame([{'synset_id':l.strip().split(' ')[0], 'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]} for l in f.readlines()])
            labels = labels_df.sort('synset_id')['name'].values

            indices =(-scores).argsort()[:5]
            predictions = labels[indices]
            print predictions
            print scores

            meta = [(p, '%.5f' % scores[i]) for i,p in zip(indices, predictions)]
            print meta
#           vvvvvvvvvvv


    # Save
    print("Saving results into %s" % args.output_file)
    np.save(args.output_file, predictions)


if __name__ == '__main__':
    main(sys.argv)

変更が完了したら、次のコマンドを実行します.
 python classifymnist.py --print_results --force_grayscale --center_only --labels_file ../examples/mnist/synset_words.txt ../examples/mnist/4.jpg resultsfile

結果は以下の通りである:用caffe训练好的lenet_iter_10000.caffemodel测试单张mnist图片_第2张图片
参照:::http://www.cnblogs.com/denny402/p/5111018.html https://github.com/BVLC/caffe/pull/2359/commits/dd3a5f9268ca3bdf19a17760bd6f568e21c1b521 http://blog.csdn.net/gzljss/article/details/45849013 http://blog.csdn.net/cqcyst/article/details/47364429 http://caffe.berkeleyvision.org/gathered/examples/feature_extraction.html