Skip to content
Snippets Groups Projects
Commit e7f3ff25 authored by Dominik Hebeler's avatar Dominik Hebeler
Browse files

Merge branch '485-ux-bei-such-timeout' into 'development'

Großen Bug behoben durch den viel zu viele Fetcher gestartet wurden.

Closes #485

See merge request !853
parents 8c2e2809 3ec44da4
No related branches found
No related tags found
1 merge request!1365Resolve "Filter Options for MetaGer"
...@@ -61,6 +61,8 @@ class Searcher implements ShouldQueue ...@@ -61,6 +61,8 @@ class Searcher implements ShouldQueue
// without killing this searcher directly. // without killing this searcher directly.
$mission = Redis::blpop($this->name . ".queue", 4); $mission = Redis::blpop($this->name . ".queue", 4);
$this->counter++; $this->counter++;
$this->updateStats(microtime(true) - $time);
$this->switchToRunning();
// The mission can be empty when blpop hit the timeout // The mission can be empty when blpop hit the timeout
if(empty($mission)){ if(empty($mission)){
continue; continue;
...@@ -83,28 +85,6 @@ class Searcher implements ShouldQueue ...@@ -83,28 +85,6 @@ class Searcher implements ShouldQueue
$this->storeResult($result, $poptime, $hashValue); $this->storeResult($result, $poptime, $hashValue);
/**
* When a Searcher is initially started the redis value for $this->name is set to "locked"
* which effectively will prevent new Searchers of this type to be started. (Value is checked by the MetaGer process which starts the Searchers)
* This is done so the MetaGer processes won't start hundreds of Searchers parallely when under high work load.
* It will force that Searchers can only be started one after the other.
* When a new Searcher has served a minimum of three requests we have enough data to decide whether we need even more Searchers.
* To do so we will then set the redis value for $this->name to "running".
* There is a case where we don't want new Searchers to be started even if we would need to do so to serve every Request:
* When a search engine needs more time to produce search results than the timeout of the MetaGer process, we won't even bother of spawning
* more and more Searchers because they would just block free worker processes from serving the important engines which will give results in time.
**/
if($this->counter === 3 || $this->recheck){
# If the MetaGer process waits longer for the results than this Fetcher will probably need to fetch
# Or if this engine is in the array of important engines which we will always try to serve
if($timeout >= $medianFetchTime || in_array($this->name, $this->importantEngines)){
Redis::set($this->name, "running");
$this->recheck = false;
}else{
$this->recheck = true;
}
}
// Reset the time of the last Job so we can calculate // Reset the time of the last Job so we can calculate
// the time we have spend waiting for a new job // the time we have spend waiting for a new job
// We submit that calculation to the Redis systemin the method // We submit that calculation to the Redis systemin the method
...@@ -121,6 +101,30 @@ class Searcher implements ShouldQueue ...@@ -121,6 +101,30 @@ class Searcher implements ShouldQueue
$this->shutdown(); $this->shutdown();
} }
private function switchToRunning(){
/**
* When a Searcher is initially started the redis value for $this->name is set to "locked"
* which effectively will prevent new Searchers of this type to be started. (Value is checked by the MetaGer process which starts the Searchers)
* This is done so the MetaGer processes won't start hundreds of Searchers parallely when under high work load.
* It will force that Searchers can only be started one after the other.
* When a new Searcher has served a minimum of three requests we have enough data to decide whether we need even more Searchers.
* To do so we will then set the redis value for $this->name to "running".
* There is a case where we don't want new Searchers to be started even if we would need to do so to serve every Request:
* When a search engine needs more time to produce search results than the timeout of the MetaGer process, we won't even bother of spawning
* more and more Searchers because they would just block free worker processes from serving the important engines which will give results in time.
**/
if($this->counter === 3){
# If the MetaGer process waits longer for the results than this Fetcher will probably need to fetch
# Or if this engine is in the array of important engines which we will always try to serve
Redis::set($this->name, "running");
$this->recheck = false;
}
}
private function updateStats($poptime){
$connectionInfo = base64_encode(json_encode(curl_getinfo($this->ch), true));
Redis::hset($this->name . ".stats", $this->pid, $connectionInfo . ";" . $poptime);
}
private function getFetchTime(){ private function getFetchTime(){
$vals = Redis::hgetall($this->name . ".stats"); $vals = Redis::hgetall($this->name . ".stats");
if(sizeof($vals) === 0){ if(sizeof($vals) === 0){
...@@ -147,8 +151,6 @@ class Searcher implements ShouldQueue ...@@ -147,8 +151,6 @@ class Searcher implements ShouldQueue
private function storeResult($result, $poptime, $hashValue){ private function storeResult($result, $poptime, $hashValue){
Redis::hset('search.' . $hashValue, $this->name, $result); Redis::hset('search.' . $hashValue, $this->name, $result);
$connectionInfo = base64_encode(json_encode(curl_getinfo($this->ch), true));
Redis::hset($this->name . ".stats", $this->pid, $connectionInfo . ";" . $poptime);
$this->lastTime = microtime(true); $this->lastTime = microtime(true);
} }
...@@ -159,6 +161,7 @@ class Searcher implements ShouldQueue ...@@ -159,6 +161,7 @@ class Searcher implements ShouldQueue
} }
// We should close our curl handle before we do so // We should close our curl handle before we do so
curl_close($this->ch); curl_close($this->ch);
Log::info("Exiting here!");
} }
private function initCurlHandle(){ private function initCurlHandle(){
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment