Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add bshtml and bibtex parser #343

Merged
merged 3 commits into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions community/document-parsers/document-parser-bibtex/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.alibaba.cloud.ai</groupId>
<artifactId>spring-ai-alibaba</artifactId>
<version>${revision}</version>
<relativePath>../../../pom.xml</relativePath>
</parent>

<artifactId>document-parser-bibtex</artifactId>
<name>document-parser-bibtex</name>
<description>document-parser-bibtex for Spring AI Alibaba</description>
<packaging>jar</packaging>
<url>https://github.com/alibaba/spring-ai-alibaba</url>
<scm>
<url>https://github.com/alibaba/spring-ai-alibaba</url>
<connection>git://github.com/alibaba/spring-ai-alibaba.git</connection>
<developerConnection>[email protected]:alibaba/spring-ai-alibaba.git</developerConnection>
</scm>

<properties>
<maven.compiler.source>17</maven.compiler.source>
<maven.compiler.target>17</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<jbibtex.version>1.0.20</jbibtex.version>
</properties>

<dependencies>
<dependency>
<groupId>com.alibaba.cloud.ai</groupId>
<artifactId>spring-ai-alibaba-core</artifactId>
<version>${project.parent.version}</version>
</dependency>

<dependency>
<groupId>org.jbibtex</groupId>
<artifactId>jbibtex</artifactId>
<version>${jbibtex.version}</version>
</dependency>

<dependency>
<groupId>com.alibaba.cloud.ai</groupId>
<artifactId>document-parser-apache-pdfbox</artifactId>
<version>${project.parent.version}</version>
</dependency>

<!-- test dependencies -->
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-test</artifactId>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<scope>test</scope>
</dependency>

<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
/*
* Copyright 2024-2025 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.cloud.ai.parser.bibtex;

import com.alibaba.cloud.ai.document.DocumentParser;
import com.alibaba.cloud.ai.parser.apache.pdfbox.PagePdfDocumentParser;
import org.jbibtex.BibTeXDatabase;
import org.jbibtex.BibTeXEntry;
import org.jbibtex.BibTeXParser;
import org.jbibtex.Key;
import org.jbibtex.Value;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.ExtractedTextFormatter;
import org.springframework.ai.reader.pdf.config.PdfDocumentReaderConfig;
import org.springframework.core.io.DefaultResourceLoader;

import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

/**
* @author HeYQ
* @since 2025-01-02 23:15
*/

public class BibtexDocumentParser implements DocumentParser {

private final Logger logger = LoggerFactory.getLogger(getClass());

// US-ASCII UTF-8
private final String charsetName;

private final Integer maxContentChars;

private final Integer maxDocs;

private final Pattern filePattern;

private final DocumentParser parser;

public BibtexDocumentParser() {
this("UTF-8", null, null, null,
new PagePdfDocumentParser(PdfDocumentReaderConfig.builder()
.withPageTopMargin(0)
.withPageBottomMargin(0)
.withPageExtractedTextFormatter(ExtractedTextFormatter.builder()
.withNumberOfTopTextLinesToDelete(0)
.withNumberOfBottomTextLinesToDelete(3)
.withNumberOfTopPagesToSkipBeforeDelete(0)
.build())
.withPagesPerDocument(1)
.build()));
}

public BibtexDocumentParser(String charsetName, Integer maxContentChars, Integer maxDocs, Pattern filePattern,
DocumentParser parser) {
this.charsetName = charsetName;
this.maxContentChars = maxContentChars;
this.maxDocs = maxDocs;
this.filePattern = filePattern;
this.parser = parser;
}

@Override
public List<Document> parse(InputStream inputStream) {
try (Reader reader = new InputStreamReader(inputStream, charsetName)) {
List<Document> documentList = new ArrayList<>(10);
BibTeXParser bibtexParser = new BibTeXParser();
BibTeXDatabase database = bibtexParser.parse(reader);
Map<Key, BibTeXEntry> entries = database.getEntries();
if (entries.isEmpty()) {
return documentList;
}
if (maxDocs != null && maxDocs > 0 && entries.size() > maxDocs) {
entries = entries.entrySet()
.stream()
.limit(maxDocs)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue,
(existing, replacement) -> existing));
}
for (BibTeXEntry entry : entries.values()) {
Map<String, Object> metadata = new HashMap<>();
metadata.put(entry.getType().getValue(), entry.getKey());
for (Key key : entry.getFields().keySet()) {
Value value = entry.getFields().get(key);
metadata.put(key.getValue(), value.toUserString());
}
List<String> fileNames = new ArrayList<>();
if (metadata.containsKey("file")) {
String fileValue = metadata.get("file").toString();
if (!Objects.isNull(filePattern)) {
Matcher matcher = filePattern.matcher(metadata.get("file").toString());
while (matcher.find()) {
fileNames.add(matcher.group());
}
}
else {
Collections.addAll(fileNames, fileValue.split("[;,\\s]+"));
}
}
StringBuilder content = new StringBuilder(metadata.getOrDefault("abstract", "").toString());
if (!fileNames.isEmpty()) {
for (String fileName : fileNames) {
try (InputStream fileInputStream = new DefaultResourceLoader()
.getResource("classpath:/" + fileName)
.getInputStream()) {
List<Document> docs = parser.parse(fileInputStream);
if (!docs.isEmpty()) {
content.append(docs.get(0).getText());
}
}
catch (IOException e) {
// Log the exception and continue with the next file
logger.warn("Failed to read file: " + fileName, e);
}

}
}

if (maxContentChars != null && maxContentChars > 0) {
int endIndex = Math.min(maxContentChars, content.length());
content = new StringBuilder(content.substring(0, endIndex));
}

Document document = new Document(content.toString(), metadata);
documentList.add(document);
}

return documentList;
}
catch (Exception e) {
logger.error("Error parsing input stream", e);
throw new RuntimeException("Error parsing input stream", e);
}

}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Copyright 2024-2025 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.cloud.ai.parser.bibtex;

import com.alibaba.cloud.ai.document.DocumentParser;
import com.alibaba.cloud.ai.parser.apache.pdfbox.PagePdfDocumentParser;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.ExtractedTextFormatter;
import org.springframework.ai.reader.pdf.config.PdfDocumentReaderConfig;

import java.io.InputStream;

/**
* @author HeYQ
* @since 2025-01-02 23:15
*/

public class BibtexDocumentParserText {

@ParameterizedTest
@ValueSource(strings = { "wiley.bib" })
void should_parse_xml_file(String fileName) {
// DocumentParser parser = new BibtexDocumentParser();
DocumentParser parser = new BibtexDocumentParser("UTF-8", 10, 2, null,
new PagePdfDocumentParser(PdfDocumentReaderConfig.builder()
.withPageTopMargin(0)
.withPageBottomMargin(0)
.withPageExtractedTextFormatter(ExtractedTextFormatter.builder()
.withNumberOfTopTextLinesToDelete(0)
.withNumberOfBottomTextLinesToDelete(3)
.withNumberOfTopPagesToSkipBeforeDelete(0)
.build())
.withPagesPerDocument(1)
.build()));
;
InputStream inputStream = getClass().getClassLoader().getResourceAsStream(fileName);
for (Document document : parser.parse(inputStream)) {
System.out.println(document.getText());
}

}

}
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
@article{https://doi.org/10.1002/(SICI)1521-4133(199812)100:12<524::AID-LIPI524>3.0.CO;2-6,
author = {Mang, Theo},
title = {Umweltrelevante Kriterien zur Anwendung von Pflanzenölen und deren Derivaten im Schmierstoffbereich},
journal = {Lipid / Fett},
volume = {100},
number = {12},
pages = {524-527},
doi = {https://doi.org/10.1002/(SICI)1521-4133(199812)100:12<524::AID-LIPI524>3.0.CO;2-6},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/%28SICI%291521-4133%28199812%29100%3A12%3C524%3A%3AAID-LIPI524%3E3.0.CO%3B2-6},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/%28SICI%291521-4133%28199812%29100%3A12%3C524%3A%3AAID-LIPI524%3E3.0.CO%3B2-6},
abstract = {Abstract Der Einsatz nachwachsender Rohstoffe und deren Derivate in Schmierstoffen wird durch ihre besondere Umweltverträglichkeit motiviert, wobei die Substitution von Mineralöl durch biologisch abbaubare Grundöle im Vordergrund steht. Inzwischen werden für nahezu alle Schmierstoffanwendungen umweltfreundliche, biologisch abbaubare Alternativen zu den herkömmlichen Mineralölprodukten angeboten. 1997 wurden in Deutschland ca. 40000 t biologisch schnell abbaubare Schmierstoffe abgesetzt, also etwa 4,5\% der gesamten Schmierstoffmenge. Die weitere Steigerung dieses Anteils ist Ziel verschiedener Maßnahmen von Regierungen und Behörden. Allgemein ist anerkannt, daß potentiell mehr als 90\% aller Schmierstoffe auf Basis nachwachsender Rohstoffe dargestellt werden können.},
year = {1998}
}
@inproceedings{shen2021layoutparser,
title = {LayoutParser: A unified toolkit for deep learning based document image analysis},
author = {Shen, Zejiang and Zhang, Ruochen and Dell, Melissa and Lee, Benjamin Charles Germain and Carlson, Jacob and Li, Weining},
booktitle = {Document Analysis and Recognition--ICDAR 2021: 16th International Conference, Lausanne, Switzerland, September 5--10, 2021, Proceedings, Part I 16},
pages = {131--146},
year = {2021},
organization = {Springer},
editor = {Llad{\'o}s, Josep
and Lopresti, Daniel
and Uchida, Seiichi},
file = {layout-parser-paper.pdf},
abstract = {{Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.",
isbn="978-3-030-86549-8}},
}
@inproceedings{10.1145/3652037.3663916,
author = {Maganaris, Constantine and Protopapadakis, Eftychios and Doulamis, Nikolaos},
title = {Outlier detection in maritime environments using AIS data and deep recurrent architectures},
year = {2024},
isbn = {9798400717604},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3652037.3663916},
doi = {10.1145/3652037.3663916},
booktitle = {Proceedings of the 17th International Conference on PErvasive Technologies Related to Assistive Environments},
pages = {420–427},
numpages = {8},
keywords = {AI, AIS, GRU, RNN, datasets, deep learning, maritime, neural networks, outlier detection, segmentation},
location = {Crete, Greece},
series = {PETRA '24}
}
@INBOOK{inbook-full,
author = "Knuth | Donald E. ",
title = "Fundamental Algorithms",
volume = "1",
series = "The Art of Computer Programming",
publisher = "Addison-Wesley",
address = "Reading Massachusetts",
edition = "Second",
month = "10 jan",
year = "{\noopsort{1973b}}1973",
type = "Section",
chapter = "1.2",
pages = "10 119",
note = "This is a full INBOOK entry",
}

Loading
Loading