Package org.wikipediacleaner.api.data

Examples of org.wikipediacleaner.api.data.Page


        }
      }
    }
    Collections.sort(resultList);
    Iterator<Page> itPage = resultList.iterator();
    Page previousPage = null;
    while (itPage.hasNext()) {
      Page page = itPage.next();
      if ((previousPage != null) &&
          (Page.areSameTitle(previousPage.getTitle(), page.getTitle()))) {
        itPage.remove();
      } else {
        previousPage = page;
      }
    }
View Full Code Here


      while (itTemplate.hasNext()) {
        Element currentTemplate = (Element) itTemplate.next();
        String pageId = currentTemplate.getAttributeValue("pageid");
        String ns = currentTemplate.getAttributeValue("ns");
        String title = currentTemplate.getAttributeValue("title");
        Page template = DataManager.getPage(
            getWiki(), title, null, null, null);
        template.setNamespace(ns);
        template.setPageId(pageId);
        if (currentTemplate.getAttribute("missing") != null) {
          template.setExisting(Boolean.FALSE);
        }
        if (!list.contains(template)) {
          list.add(template);
        }
      }
View Full Code Here

        String title = xpaTitle.valueOf(currentNode);
        for (Page p : pages) {
          tmpPages.clear();
          Iterator<Page> it = p.getRedirectIteratorWithPage();
          while (it.hasNext()) {
            Page p2 = it.next();
            tmpPages.add(p2);
            if ((p2.getTitle() != null) &&
                (Page.areSameTitle(p2.getTitle(), title))) {
              List listTemplates = xpaTemplate.selectNodes(currentNode);
              if (listTemplates.size() > 0) {
                for (Page p3 : tmpPages) {
                  p3.setDisambiguationPage(Boolean.TRUE);
                }
View Full Code Here

            pageId = Integer.valueOf(tmp);
          }
        } catch (NumberFormatException e) {
          //
        }
        Page page = DataManager.getPage(
            getWiki(), currentNode.getAttributeValue("title"),
            pageId, null, null);
        page.setNamespace(currentNode.getAttributeValue("ns"));
        list.add(page);
      }

      // Retrieve continue
      return shouldContinue(
View Full Code Here

      XPath xpaPageId = XPath.newInstance("./@pageid");
      XPath xpaNs = XPath.newInstance("./@ns");
      XPath xpaTitle = XPath.newInstance("./@title");
      while (iter.hasNext()) {
        Element currentNode = (Element) iter.next();
        Page page = DataManager.getPage(
            getWiki(), xpaTitle.valueOf(currentNode), null, null, null);
        page.setNamespace(xpaNs.valueOf(currentNode));
        page.setPageId(xpaPageId.valueOf(currentNode));
        if ((page.getNamespace() != null) &&
            (page.getNamespace().intValue() == Namespace.CATEGORY)) {
          categories.put(page, depth + 1);
        } else {
          if (!list.contains(page)) {
            list.add(page);
          }
View Full Code Here

      XPath xpaPageId = XPath.newInstance("./@id");
      XPath xpaNs = XPath.newInstance("./@ns");
      XPath xpaTitle = XPath.newInstance("./@title");
      while (iter.hasNext()) {
        Element currentNode = (Element) iter.next();
        Page page = DataManager.getPage(
            getWiki(), xpaTitle.valueOf(currentNode), null, null, null);
        page.setNamespace(xpaNs.valueOf(currentNode));
        page.setPageId(xpaPageId.valueOf(currentNode));
        list.add(page);
      }
    } catch (JDOMException e) {
      log.error("Error loading random list", e);
      throw new APIException("Error parsing XML", e);
View Full Code Here

    if (image != null) {
      try {
        API api = APIFactory.getAPI();

        // Retrieve image descriptions
        Page imagePage = DataManager.getPage(
            image.getWiki(),
            image.getNamespace() + ":" + image.getImage(),
            null, null, null);
        api.retrieveContents(
            image.getWiki(),
            Collections.singletonList(imagePage), false, false);

        // Use image description on the wiki
        if (Boolean.TRUE.equals(imagePage.isExisting())) {
          PageAnalysis pageAnalysis = imagePage.getAnalysis(imagePage.getContents(), true);
          for (PageElementTemplate template : pageAnalysis.getTemplates()) {
            if (Page.areSameTitle("Information", template.getTemplateName())) {
              String description = template.getParameterValue("Description");
              if ((description != null) && (description.trim().length() > 0)) {
                result.add(description.trim());
              }
            }
          }
        }

        // Retrieve image description on Commons
        Page commonsPage = DataManager.getPage(
            EnumWikipedia.COMMONS,
            "File:" + image.getImage(),
            null, null, null);
        api.retrieveContents(
            EnumWikipedia.COMMONS,
            Collections.singletonList(commonsPage), false, false);
        if (Boolean.TRUE.equals(commonsPage.isExisting())) {
          PageAnalysis pageAnalysis = commonsPage.getAnalysis(commonsPage.getContents(), true);
          for (PageElementTemplate template : pageAnalysis.getTemplates()) {
            if (Page.areSameTitle("Information", template.getTemplateName())) {
              String global = template.getParameterValue("Description");
              if ((global != null) && (global.trim().length() > 0)) {
                PageAnalysis descAnalysis = commonsPage.getAnalysis(global, true);
                for (PageElementTemplate template2 : descAnalysis.getTemplates()) {
                  if (Page.areSameTitle(image.getWiki().getSettings().getCode(), template2.getTemplateName())) {
                    String description = template2.getParameterValue("1");
                    if ((description != null) && (description.trim().length() > 0)) {
                      result.add(description.trim());
View Full Code Here

      List results = xpa.selectNodes(root);
      Iterator iter = results.iterator();
      while (iter.hasNext()) {
        Element currentNode = (Element) iter.next();
        if ("infinity".equals(currentNode.getAttributeValue("expiry"))) {
          Page page = DataManager.getPage(
              getWiki(), currentNode.getAttributeValue("title"), null, null, null);
          page.setNamespace(currentNode.getAttributeValue("ns"));
          list.add(page);
        }
      }

      // Retrieve continue
View Full Code Here

        }
      }
      for (Page page : splitPages) {
        Iterator<Page> itPage = page.getRedirectIteratorWithPage();
        while (itPage.hasNext()) {
          Page tmpPage = itPage.next();
          if (tmpPage.isDisambiguationPage() == null) {
            tmpPage.setDisambiguationPage(Boolean.FALSE);
          }
        }
      }
    }
  }
View Full Code Here

        //
      }
      for (Page page : splitPages) {
        Iterator<Page> itPage = page.getRedirectIteratorWithPage();
        while (itPage.hasNext()) {
          Page tmpPage = itPage.next();
          if (tmpPage.isDisambiguationPage() == null) {
            tmpPage.setDisambiguationPage(Boolean.FALSE);
          }
        }
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.wikipediacleaner.api.data.Page

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.