summaryrefslogtreecommitdiff
path: root/Master/tlpkg/tlperl/site/lib/WWW
diff options
context:
space:
mode:
authorKarl Berry <karl@freefriends.org>2016-04-05 22:27:26 +0000
committerKarl Berry <karl@freefriends.org>2016-04-05 22:27:26 +0000
commitb56b320b5e2515160073fa1b469514002688fe11 (patch)
tree965a7100c5e45fca8ec803d22b8b6ce14fca4633 /Master/tlpkg/tlperl/site/lib/WWW
parentd26c206452d2e285c3bbf949f34011e4a55fd8f9 (diff)
tlperl 5.22.1 from siep
git-svn-id: svn://tug.org/texlive/trunk@40252 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/tlpkg/tlperl/site/lib/WWW')
-rw-r--r--Master/tlpkg/tlperl/site/lib/WWW/RobotRules.pm453
-rw-r--r--Master/tlpkg/tlperl/site/lib/WWW/RobotRules/AnyDBM_File.pm170
2 files changed, 623 insertions, 0 deletions
diff --git a/Master/tlpkg/tlperl/site/lib/WWW/RobotRules.pm b/Master/tlpkg/tlperl/site/lib/WWW/RobotRules.pm
new file mode 100644
index 00000000000..5dfc4539349
--- /dev/null
+++ b/Master/tlpkg/tlperl/site/lib/WWW/RobotRules.pm
@@ -0,0 +1,453 @@
+package WWW::RobotRules;
+
+$VERSION = "6.02";
+sub Version { $VERSION; }
+
+use strict;
+use URI ();
+
+
+
+sub new {
+ my($class, $ua) = @_;
+
+ # This ugly hack is needed to ensure backwards compatibility.
+ # The "WWW::RobotRules" class is now really abstract.
+ $class = "WWW::RobotRules::InCore" if $class eq "WWW::RobotRules";
+
+ my $self = bless { }, $class;
+ $self->agent($ua);
+ $self;
+}
+
+
+sub parse {
+ my($self, $robot_txt_uri, $txt, $fresh_until) = @_;
+ $robot_txt_uri = URI->new("$robot_txt_uri");
+ my $netloc = $robot_txt_uri->host . ":" . $robot_txt_uri->port;
+
+ $self->clear_rules($netloc);
+ $self->fresh_until($netloc, $fresh_until || (time + 365*24*3600));
+
+ my $ua;
+ my $is_me = 0; # 1 iff this record is for me
+ my $is_anon = 0; # 1 iff this record is for *
+ my $seen_disallow = 0; # watch for missing record separators
+ my @me_disallowed = (); # rules disallowed for me
+ my @anon_disallowed = (); # rules disallowed for *
+
+ # blank lines are significant, so turn CRLF into LF to avoid generating
+ # false ones
+ $txt =~ s/\015\012/\012/g;
+
+ # split at \012 (LF) or \015 (CR) (Mac text files have just CR for EOL)
+ for(split(/[\012\015]/, $txt)) {
+
+ # Lines containing only a comment are discarded completely, and
+ # therefore do not indicate a record boundary.
+ next if /^\s*\#/;
+
+ s/\s*\#.*//; # remove comments at end-of-line
+
+ if (/^\s*$/) { # blank line
+ last if $is_me; # That was our record. No need to read the rest.
+ $is_anon = 0;
+ $seen_disallow = 0;
+ }
+ elsif (/^\s*User-Agent\s*:\s*(.*)/i) {
+ $ua = $1;
+ $ua =~ s/\s+$//;
+
+ if ($seen_disallow) {
+ # treat as start of a new record
+ $seen_disallow = 0;
+ last if $is_me; # That was our record. No need to read the rest.
+ $is_anon = 0;
+ }
+
+ if ($is_me) {
+ # This record already had a User-agent that
+ # we matched, so just continue.
+ }
+ elsif ($ua eq '*') {
+ $is_anon = 1;
+ }
+ elsif($self->is_me($ua)) {
+ $is_me = 1;
+ }
+ }
+ elsif (/^\s*Disallow\s*:\s*(.*)/i) {
+ unless (defined $ua) {
+ warn "RobotRules <$robot_txt_uri>: Disallow without preceding User-agent\n" if $^W;
+ $is_anon = 1; # assume that User-agent: * was intended
+ }
+ my $disallow = $1;
+ $disallow =~ s/\s+$//;
+ $seen_disallow = 1;
+ if (length $disallow) {
+ my $ignore;
+ eval {
+ my $u = URI->new_abs($disallow, $robot_txt_uri);
+ $ignore++ if $u->scheme ne $robot_txt_uri->scheme;
+ $ignore++ if lc($u->host) ne lc($robot_txt_uri->host);
+ $ignore++ if $u->port ne $robot_txt_uri->port;
+ $disallow = $u->path_query;
+ $disallow = "/" unless length $disallow;
+ };
+ next if $@;
+ next if $ignore;
+ }
+
+ if ($is_me) {
+ push(@me_disallowed, $disallow);
+ }
+ elsif ($is_anon) {
+ push(@anon_disallowed, $disallow);
+ }
+ }
+ elsif (/\S\s*:/) {
+ # ignore
+ }
+ else {
+ warn "RobotRules <$robot_txt_uri>: Malformed record: <$_>\n" if $^W;
+ }
+ }
+
+ if ($is_me) {
+ $self->push_rules($netloc, @me_disallowed);
+ }
+ else {
+ $self->push_rules($netloc, @anon_disallowed);
+ }
+}
+
+
+#
+# Returns TRUE if the given name matches the
+# name of this robot
+#
+sub is_me {
+ my($self, $ua_line) = @_;
+ my $me = $self->agent;
+
+ # See whether my short-name is a substring of the
+ # "User-Agent: ..." line that we were passed:
+
+ if(index(lc($me), lc($ua_line)) >= 0) {
+ return 1;
+ }
+ else {
+ return '';
+ }
+}
+
+
+sub allowed {
+ my($self, $uri) = @_;
+ $uri = URI->new("$uri");
+
+ return 1 unless $uri->scheme eq 'http' or $uri->scheme eq 'https';
+ # Robots.txt applies to only those schemes.
+
+ my $netloc = $uri->host . ":" . $uri->port;
+
+ my $fresh_until = $self->fresh_until($netloc);
+ return -1 if !defined($fresh_until) || $fresh_until < time;
+
+ my $str = $uri->path_query;
+ my $rule;
+ for $rule ($self->rules($netloc)) {
+ return 1 unless length $rule;
+ return 0 if index($str, $rule) == 0;
+ }
+ return 1;
+}
+
+
+# The following methods must be provided by the subclass.
+sub agent;
+sub visit;
+sub no_visits;
+sub last_visits;
+sub fresh_until;
+sub push_rules;
+sub clear_rules;
+sub rules;
+sub dump;
+
+
+
+package WWW::RobotRules::InCore;
+
+use vars qw(@ISA);
+@ISA = qw(WWW::RobotRules);
+
+
+
+sub agent {
+ my ($self, $name) = @_;
+ my $old = $self->{'ua'};
+ if ($name) {
+ # Strip it so that it's just the short name.
+ # I.e., "FooBot" => "FooBot"
+ # "FooBot/1.2" => "FooBot"
+ # "FooBot/1.2 [http://foobot.int; foo@bot.int]" => "FooBot"
+
+ $name = $1 if $name =~ m/(\S+)/; # get first word
+ $name =~ s!/.*!!; # get rid of version
+ unless ($old && $old eq $name) {
+ delete $self->{'loc'}; # all old info is now stale
+ $self->{'ua'} = $name;
+ }
+ }
+ $old;
+}
+
+
+sub visit {
+ my($self, $netloc, $time) = @_;
+ return unless $netloc;
+ $time ||= time;
+ $self->{'loc'}{$netloc}{'last'} = $time;
+ my $count = \$self->{'loc'}{$netloc}{'count'};
+ if (!defined $$count) {
+ $$count = 1;
+ }
+ else {
+ $$count++;
+ }
+}
+
+
+sub no_visits {
+ my ($self, $netloc) = @_;
+ $self->{'loc'}{$netloc}{'count'};
+}
+
+
+sub last_visit {
+ my ($self, $netloc) = @_;
+ $self->{'loc'}{$netloc}{'last'};
+}
+
+
+sub fresh_until {
+ my ($self, $netloc, $fresh_until) = @_;
+ my $old = $self->{'loc'}{$netloc}{'fresh'};
+ if (defined $fresh_until) {
+ $self->{'loc'}{$netloc}{'fresh'} = $fresh_until;
+ }
+ $old;
+}
+
+
+sub push_rules {
+ my($self, $netloc, @rules) = @_;
+ push (@{$self->{'loc'}{$netloc}{'rules'}}, @rules);
+}
+
+
+sub clear_rules {
+ my($self, $netloc) = @_;
+ delete $self->{'loc'}{$netloc}{'rules'};
+}
+
+
+sub rules {
+ my($self, $netloc) = @_;
+ if (defined $self->{'loc'}{$netloc}{'rules'}) {
+ return @{$self->{'loc'}{$netloc}{'rules'}};
+ }
+ else {
+ return ();
+ }
+}
+
+
+sub dump
+{
+ my $self = shift;
+ for (keys %$self) {
+ next if $_ eq 'loc';
+ print "$_ = $self->{$_}\n";
+ }
+ for (keys %{$self->{'loc'}}) {
+ my @rules = $self->rules($_);
+ print "$_: ", join("; ", @rules), "\n";
+ }
+}
+
+
+1;
+
+__END__
+
+
+# Bender: "Well, I don't have anything else
+# planned for today. Let's get drunk!"
+
+=head1 NAME
+
+WWW::RobotRules - database of robots.txt-derived permissions
+
+=head1 SYNOPSIS
+
+ use WWW::RobotRules;
+ my $rules = WWW::RobotRules->new('MOMspider/1.0');
+
+ use LWP::Simple qw(get);
+
+ {
+ my $url = "http://some.place/robots.txt";
+ my $robots_txt = get $url;
+ $rules->parse($url, $robots_txt) if defined $robots_txt;
+ }
+
+ {
+ my $url = "http://some.other.place/robots.txt";
+ my $robots_txt = get $url;
+ $rules->parse($url, $robots_txt) if defined $robots_txt;
+ }
+
+ # Now we can check if a URL is valid for those servers
+ # whose "robots.txt" files we've gotten and parsed:
+ if($rules->allowed($url)) {
+ $c = get $url;
+ ...
+ }
+
+=head1 DESCRIPTION
+
+This module parses F</robots.txt> files as specified in
+"A Standard for Robot Exclusion", at
+<http://www.robotstxt.org/wc/norobots.html>
+Webmasters can use the F</robots.txt> file to forbid conforming
+robots from accessing parts of their web site.
+
+The parsed files are kept in a WWW::RobotRules object, and this object
+provides methods to check if access to a given URL is prohibited. The
+same WWW::RobotRules object can be used for one or more parsed
+F</robots.txt> files on any number of hosts.
+
+The following methods are provided:
+
+=over 4
+
+=item $rules = WWW::RobotRules->new($robot_name)
+
+This is the constructor for WWW::RobotRules objects. The first
+argument given to new() is the name of the robot.
+
+=item $rules->parse($robot_txt_url, $content, $fresh_until)
+
+The parse() method takes as arguments the URL that was used to
+retrieve the F</robots.txt> file, and the contents of the file.
+
+=item $rules->allowed($uri)
+
+Returns TRUE if this robot is allowed to retrieve this URL.
+
+=item $rules->agent([$name])
+
+Get/set the agent name. NOTE: Changing the agent name will clear the robots.txt
+rules and expire times out of the cache.
+
+=back
+
+=head1 ROBOTS.TXT
+
+The format and semantics of the "/robots.txt" file are as follows
+(this is an edited abstract of
+<http://www.robotstxt.org/wc/norobots.html>):
+
+The file consists of one or more records separated by one or more
+blank lines. Each record contains lines of the form
+
+ <field-name>: <value>
+
+The field name is case insensitive. Text after the '#' character on a
+line is ignored during parsing. This is used for comments. The
+following <field-names> can be used:
+
+=over 3
+
+=item User-Agent
+
+The value of this field is the name of the robot the record is
+describing access policy for. If more than one I<User-Agent> field is
+present the record describes an identical access policy for more than
+one robot. At least one field needs to be present per record. If the
+value is '*', the record describes the default access policy for any
+robot that has not not matched any of the other records.
+
+The I<User-Agent> fields must occur before the I<Disallow> fields. If a
+record contains a I<User-Agent> field after a I<Disallow> field, that
+constitutes a malformed record. This parser will assume that a blank
+line should have been placed before that I<User-Agent> field, and will
+break the record into two. All the fields before the I<User-Agent> field
+will constitute a record, and the I<User-Agent> field will be the first
+field in a new record.
+
+=item Disallow
+
+The value of this field specifies a partial URL that is not to be
+visited. This can be a full path, or a partial path; any URL that
+starts with this value will not be retrieved
+
+=back
+
+Unrecognized records are ignored.
+
+=head1 ROBOTS.TXT EXAMPLES
+
+The following example "/robots.txt" file specifies that no robots
+should visit any URL starting with "/cyberworld/map/" or "/tmp/":
+
+ User-agent: *
+ Disallow: /cyberworld/map/ # This is an infinite virtual URL space
+ Disallow: /tmp/ # these will soon disappear
+
+This example "/robots.txt" file specifies that no robots should visit
+any URL starting with "/cyberworld/map/", except the robot called
+"cybermapper":
+
+ User-agent: *
+ Disallow: /cyberworld/map/ # This is an infinite virtual URL space
+
+ # Cybermapper knows where to go.
+ User-agent: cybermapper
+ Disallow:
+
+This example indicates that no robots should visit this site further:
+
+ # go away
+ User-agent: *
+ Disallow: /
+
+This is an example of a malformed robots.txt file.
+
+ # robots.txt for ancientcastle.example.com
+ # I've locked myself away.
+ User-agent: *
+ Disallow: /
+ # The castle is your home now, so you can go anywhere you like.
+ User-agent: Belle
+ Disallow: /west-wing/ # except the west wing!
+ # It's good to be the Prince...
+ User-agent: Beast
+ Disallow:
+
+This file is missing the required blank lines between records.
+However, the intention is clear.
+
+=head1 SEE ALSO
+
+L<LWP::RobotUA>, L<WWW::RobotRules::AnyDBM_File>
+
+=head1 COPYRIGHT
+
+ Copyright 1995-2009, Gisle Aas
+ Copyright 1995, Martijn Koster
+
+This library is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
diff --git a/Master/tlpkg/tlperl/site/lib/WWW/RobotRules/AnyDBM_File.pm b/Master/tlpkg/tlperl/site/lib/WWW/RobotRules/AnyDBM_File.pm
new file mode 100644
index 00000000000..8daa68870f7
--- /dev/null
+++ b/Master/tlpkg/tlperl/site/lib/WWW/RobotRules/AnyDBM_File.pm
@@ -0,0 +1,170 @@
+package WWW::RobotRules::AnyDBM_File;
+
+require WWW::RobotRules;
+@ISA = qw(WWW::RobotRules);
+$VERSION = "6.00";
+
+use Carp ();
+use AnyDBM_File;
+use Fcntl;
+use strict;
+
+=head1 NAME
+
+WWW::RobotRules::AnyDBM_File - Persistent RobotRules
+
+=head1 SYNOPSIS
+
+ require WWW::RobotRules::AnyDBM_File;
+ require LWP::RobotUA;
+
+ # Create a robot useragent that uses a diskcaching RobotRules
+ my $rules = WWW::RobotRules::AnyDBM_File->new( 'my-robot/1.0', 'cachefile' );
+ my $ua = WWW::RobotUA->new( 'my-robot/1.0', 'me@foo.com', $rules );
+
+ # Then just use $ua as usual
+ $res = $ua->request($req);
+
+=head1 DESCRIPTION
+
+This is a subclass of I<WWW::RobotRules> that uses the AnyDBM_File
+package to implement persistent diskcaching of F<robots.txt> and host
+visit information.
+
+The constructor (the new() method) takes an extra argument specifying
+the name of the DBM file to use. If the DBM file already exists, then
+you can specify undef as agent name as the name can be obtained from
+the DBM database.
+
+=cut
+
+sub new
+{
+ my ($class, $ua, $file) = @_;
+ Carp::croak('WWW::RobotRules::AnyDBM_File filename required') unless $file;
+
+ my $self = bless { }, $class;
+ $self->{'filename'} = $file;
+ tie %{$self->{'dbm'}}, 'AnyDBM_File', $file, O_CREAT|O_RDWR, 0640
+ or Carp::croak("Can't open $file: $!");
+
+ if ($ua) {
+ $self->agent($ua);
+ }
+ else {
+ # Try to obtain name from DBM file
+ $ua = $self->{'dbm'}{"|ua-name|"};
+ Carp::croak("No agent name specified") unless $ua;
+ }
+
+ $self;
+}
+
+sub agent {
+ my($self, $newname) = @_;
+ my $old = $self->{'dbm'}{"|ua-name|"};
+ if (defined $newname) {
+ $newname =~ s!/?\s*\d+.\d+\s*$!!; # loose version
+ unless ($old && $old eq $newname) {
+ # Old info is now stale.
+ my $file = $self->{'filename'};
+ untie %{$self->{'dbm'}};
+ tie %{$self->{'dbm'}}, 'AnyDBM_File', $file, O_TRUNC|O_RDWR, 0640;
+ %{$self->{'dbm'}} = ();
+ $self->{'dbm'}{"|ua-name|"} = $newname;
+ }
+ }
+ $old;
+}
+
+sub no_visits {
+ my ($self, $netloc) = @_;
+ my $t = $self->{'dbm'}{"$netloc|vis"};
+ return 0 unless $t;
+ (split(/;\s*/, $t))[0];
+}
+
+sub last_visit {
+ my ($self, $netloc) = @_;
+ my $t = $self->{'dbm'}{"$netloc|vis"};
+ return undef unless $t;
+ (split(/;\s*/, $t))[1];
+}
+
+sub fresh_until {
+ my ($self, $netloc, $fresh) = @_;
+ my $old = $self->{'dbm'}{"$netloc|exp"};
+ if ($old) {
+ $old =~ s/;.*//; # remove cleartext
+ }
+ if (defined $fresh) {
+ $fresh .= "; " . localtime($fresh);
+ $self->{'dbm'}{"$netloc|exp"} = $fresh;
+ }
+ $old;
+}
+
+sub visit {
+ my($self, $netloc, $time) = @_;
+ $time ||= time;
+
+ my $count = 0;
+ my $old = $self->{'dbm'}{"$netloc|vis"};
+ if ($old) {
+ my $last;
+ ($count,$last) = split(/;\s*/, $old);
+ $time = $last if $last > $time;
+ }
+ $count++;
+ $self->{'dbm'}{"$netloc|vis"} = "$count; $time; " . localtime($time);
+}
+
+sub push_rules {
+ my($self, $netloc, @rules) = @_;
+ my $cnt = 1;
+ $cnt++ while $self->{'dbm'}{"$netloc|r$cnt"};
+
+ foreach (@rules) {
+ $self->{'dbm'}{"$netloc|r$cnt"} = $_;
+ $cnt++;
+ }
+}
+
+sub clear_rules {
+ my($self, $netloc) = @_;
+ my $cnt = 1;
+ while ($self->{'dbm'}{"$netloc|r$cnt"}) {
+ delete $self->{'dbm'}{"$netloc|r$cnt"};
+ $cnt++;
+ }
+}
+
+sub rules {
+ my($self, $netloc) = @_;
+ my @rules = ();
+ my $cnt = 1;
+ while (1) {
+ my $rule = $self->{'dbm'}{"$netloc|r$cnt"};
+ last unless $rule;
+ push(@rules, $rule);
+ $cnt++;
+ }
+ @rules;
+}
+
+sub dump
+{
+}
+
+1;
+
+=head1 SEE ALSO
+
+L<WWW::RobotRules>, L<LWP::RobotUA>
+
+=head1 AUTHORS
+
+Hakan Ardo E<lt>hakan@munin.ub2.lu.se>, Gisle Aas E<lt>aas@sn.no>
+
+=cut
+