|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | + |
| 3 | +import argparse |
| 4 | + |
| 5 | +from supar import VISemanticDependencyParser |
| 6 | +from supar.cmds.cmd import parse |
| 7 | + |
| 8 | + |
| 9 | +def main(): |
| 10 | + parser = argparse.ArgumentParser(description='Create Semantic Dependency Parser using Variational Inference.') |
| 11 | + parser.set_defaults(Parser=VISemanticDependencyParser) |
| 12 | + subparsers = parser.add_subparsers(title='Commands', dest='mode') |
| 13 | + # train |
| 14 | + subparser = subparsers.add_parser('train', help='Train a parser.') |
| 15 | + subparser.add_argument('--feat', '-f', default='tag,char,lemma', help='additional features to use,separated by commas.') |
| 16 | + subparser.add_argument('--build', '-b', action='store_true', help='whether to build the model first') |
| 17 | + subparser.add_argument('--max-len', type=int, help='max length of the sentences') |
| 18 | + subparser.add_argument('--buckets', default=32, type=int, help='max num of buckets to use') |
| 19 | + subparser.add_argument('--train', default='data/sdp/DM/train.conllu', help='path to train file') |
| 20 | + subparser.add_argument('--dev', default='data/sdp/DM/dev.conllu', help='path to dev file') |
| 21 | + subparser.add_argument('--test', default='data/sdp/DM/test.conllu', help='path to test file') |
| 22 | + subparser.add_argument('--embed', default='data/glove.6B.100d.txt', help='path to pretrained embeddings') |
| 23 | + subparser.add_argument('--unk', default='unk', help='unk token in pretrained embeddings') |
| 24 | + subparser.add_argument('--n-embed', default=100, type=int, help='dimension of embeddings') |
| 25 | + subparser.add_argument('--bert', default='bert-base-cased', help='which bert model to use') |
| 26 | + subparser.add_argument('--inference', default='mfvi', choices=['mfvi', 'lbp'], help='approximate inference methods') |
| 27 | + # evaluate |
| 28 | + subparser = subparsers.add_parser('evaluate', help='Evaluate the specified parser and dataset.') |
| 29 | + subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') |
| 30 | + subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') |
| 31 | + # predict |
| 32 | + subparser = subparsers.add_parser('predict', help='Use a trained parser to make predictions.') |
| 33 | + subparser.add_argument('--prob', action='store_true', help='whether to output probs') |
| 34 | + subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') |
| 35 | + subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') |
| 36 | + subparser.add_argument('--pred', default='pred.conllu', help='path to predicted result') |
| 37 | + parse(parser) |
| 38 | + |
| 39 | + |
| 40 | +if __name__ == "__main__": |
| 41 | + main() |
0 commit comments